Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
diff --git a/Documentation/devicetree/bindings/net/icplus-ip101ag.txt b/Documentation/devicetree/bindings/net/icplus-ip101ag.txt
new file mode 100644
index 0000000..a784592
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/icplus-ip101ag.txt
@@ -0,0 +1,19 @@
+IC Plus Corp. IP101A / IP101G Ethernet PHYs
+
+There are different models of the IP101G Ethernet PHY:
+- IP101GR (32-pin QFN package)
+- IP101G (die only, no package)
+- IP101GA (48-pin LQFP package)
+
+There are different models of the IP101A Ethernet PHY (which is the
+predecessor of the IP101G):
+- IP101A (48-pin LQFP package)
+- IP101AH (48-pin LQFP package)
+
+Optional properties for the IP101GR (32-pin QFN package):
+
+- icplus,select-rx-error:
+  pin 21 ("RXER/INTR_32") will output the receive error status.
+  interrupts are not routed outside the PHY in this mode.
+- icplus,select-interrupt:
+  pin 21 ("RXER/INTR_32") will output the interrupt signal.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4b1a2a8..cc6b2c0 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -170,6 +170,7 @@
 hwacom	HwaCom Systems Inc.
 i2se	I2SE GmbH
 ibm	International Business Machines (IBM)
+icplus	IC Plus Corp.
 idt	Integrated Device Technologies, Inc.
 ifi	Ingenieurburo Fur Ic-Technologie (I/F/I)
 ilitek	ILI Technology Corporation (ILITEK)
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index bd89dae..6a47629 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -31,6 +31,7 @@
    net_failover
    alias
    bridge
+   snmp_counter
 
 .. only::  subproject
 
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 32b2157..af2a694 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -370,6 +370,7 @@
 	derived from the listen socket to be bound to the L3 domain in
 	which the packets originated. Only valid when the kernel was
 	compiled with CONFIG_NET_L3_MASTER_DEV.
+        Default: 0 (disabled)
 
 tcp_low_latency - BOOLEAN
 	This is a legacy option, it has no effect anymore.
@@ -758,7 +759,7 @@
 	flows, for typical pfifo_fast qdiscs.  tcp_limit_output_bytes
 	limits the number of bytes on qdisc or device to reduce artificial
 	RTT/cwnd and reduce bufferbloat.
-	Default: 262144
+	Default: 1048576 (16 * 65536)
 
 tcp_challenge_ack_limit - INTEGER
 	Limits number of Challenge ACK sent per second, as recommended
@@ -773,6 +774,7 @@
 	being received regardless of the L3 domain in which they
 	originated. Only valid when the kernel was compiled with
 	CONFIG_NET_L3_MASTER_DEV.
+        Default: 0 (disabled)
 
 udp_mem - vector of 3 INTEGERs: min, pressure, max
 	Number of pages allowed for queueing by all UDP sockets.
@@ -799,6 +801,16 @@
 	total pages of UDP sockets exceed udp_mem pressure. The unit is byte.
 	Default: 4K
 
+RAW variables:
+
+raw_l3mdev_accept - BOOLEAN
+	Enabling this option allows a "global" bound socket to work
+	across L3 master domains (e.g., VRFs) with packets capable of
+	being received regardless of the L3 domain in which they
+	originated. Only valid when the kernel was compiled with
+	CONFIG_NET_L3_MASTER_DEV.
+	Default: 1 (enabled)
+
 CIPSOv4 Variables:
 
 cipso_cache_enable - BOOLEAN
diff --git a/Documentation/networking/ixgbe.rst b/Documentation/networking/ixgbe.rst
index 725fc69..86d887a 100644
--- a/Documentation/networking/ixgbe.rst
+++ b/Documentation/networking/ixgbe.rst
@@ -501,6 +501,19 @@
 
   ip link set <pf dev> vf <vf id> spoofchk {off|on}
 
+IPsec Offload
+-------------
+The ixgbe driver supports IPsec Hardware Offload.  When creating Security
+Associations with "ip xfrm ..." the 'offload' tag option can be used to
+register the IPsec SA with the driver in order to get higher throughput in
+the secure communications.
+
+The offload is also supported for ixgbe's VFs, but the VF must be set as
+'trusted' and the support must be enabled with::
+
+  ethtool --set-priv-flags eth<x> vf-ipsec on
+  ip link set eth<x> vf <y> trust on
+
 
 Known Issues/Troubleshooting
 ============================
diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt
index c4a54c1..58dd1c1 100644
--- a/Documentation/networking/netdev-features.txt
+++ b/Documentation/networking/netdev-features.txt
@@ -115,7 +115,7 @@
 
  * Transmit UDP segmentation offload
 
-NETIF_F_GSO_UDP_GSO_L4 accepts a single UDP header with a payload that exceeds
+NETIF_F_GSO_UDP_L4 accepts a single UDP header with a payload that exceeds
 gso_size. On segmentation, it segments the payload on gso_size boundaries and
 replicates the network and UDP headers (fixing up the last one if less than
 gso_size).
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
new file mode 100644
index 0000000..a262d32
--- /dev/null
+++ b/Documentation/networking/snmp_counter.rst
@@ -0,0 +1,745 @@
+===========
+SNMP counter
+===========
+
+This document explains the meaning of SNMP counters.
+
+General IPv4 counters
+====================
+All layer 4 packets and ICMP packets will change these counters, but
+these counters won't be changed by layer 2 packets (such as STP) or
+ARP packets.
+
+* IpInReceives
+Defined in `RFC1213 ipInReceives`_
+
+.. _RFC1213 ipInReceives: https://tools.ietf.org/html/rfc1213#page-26
+
+The number of packets received by the IP layer. It gets increasing at the
+beginning of ip_rcv function, always be updated together with
+IpExtInOctets. It indicates the number of aggregated segments after
+GRO/LRO.
+
+* IpInDelivers
+Defined in `RFC1213 ipInDelivers`_
+
+.. _RFC1213 ipInDelivers: https://tools.ietf.org/html/rfc1213#page-28
+
+The number of packets delivers to the upper layer protocols. E.g. TCP, UDP,
+ICMP and so on. If no one listens on a raw socket, only kernel
+supported protocols will be delivered, if someone listens on the raw
+socket, all valid IP packets will be delivered.
+
+* IpOutRequests
+Defined in `RFC1213 ipOutRequests`_
+
+.. _RFC1213 ipOutRequests: https://tools.ietf.org/html/rfc1213#page-28
+
+The number of packets sent via IP layer, for both single cast and
+multicast packets, and would always be updated together with
+IpExtOutOctets.
+
+* IpExtInOctets and IpExtOutOctets
+They are Linux kernel extensions, no RFC definitions. Please note,
+RFC1213 indeed defines ifInOctets  and ifOutOctets, but they
+are different things. The ifInOctets and ifOutOctets include the MAC
+layer header size but IpExtInOctets and IpExtOutOctets don't, they
+only include the IP layer header and the IP layer data.
+
+* IpExtInNoECTPkts, IpExtInECT1Pkts, IpExtInECT0Pkts, IpExtInCEPkts
+They indicate the number of four kinds of ECN IP packets, please refer
+`Explicit Congestion Notification`_ for more details.
+
+.. _Explicit Congestion Notification: https://tools.ietf.org/html/rfc3168#page-6
+
+These 4 counters calculate how many packets received per ECN
+status. They count the real frame number regardless the LRO/GRO. So
+for the same packet, you might find that IpInReceives count 1, but
+IpExtInNoECTPkts counts 2 or more.
+
+ICMP counters
+============
+* IcmpInMsgs and IcmpOutMsgs
+Defined by `RFC1213 icmpInMsgs`_ and `RFC1213 icmpOutMsgs`_
+
+.. _RFC1213 icmpInMsgs: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpOutMsgs: https://tools.ietf.org/html/rfc1213#page-43
+
+As mentioned in the RFC1213, these two counters include errors, they
+would be increased even if the ICMP packet has an invalid type. The
+ICMP output path will check the header of a raw socket, so the
+IcmpOutMsgs would still be updated if the IP header is constructed by
+a userspace program.
+
+* ICMP named types
+| These counters include most of common ICMP types, they are:
+| IcmpInDestUnreachs: `RFC1213 icmpInDestUnreachs`_
+| IcmpInTimeExcds: `RFC1213 icmpInTimeExcds`_
+| IcmpInParmProbs: `RFC1213 icmpInParmProbs`_
+| IcmpInSrcQuenchs: `RFC1213 icmpInSrcQuenchs`_
+| IcmpInRedirects: `RFC1213 icmpInRedirects`_
+| IcmpInEchos: `RFC1213 icmpInEchos`_
+| IcmpInEchoReps: `RFC1213 icmpInEchoReps`_
+| IcmpInTimestamps: `RFC1213 icmpInTimestamps`_
+| IcmpInTimestampReps: `RFC1213 icmpInTimestampReps`_
+| IcmpInAddrMasks: `RFC1213 icmpInAddrMasks`_
+| IcmpInAddrMaskReps: `RFC1213 icmpInAddrMaskReps`_
+| IcmpOutDestUnreachs: `RFC1213 icmpOutDestUnreachs`_
+| IcmpOutTimeExcds: `RFC1213 icmpOutTimeExcds`_
+| IcmpOutParmProbs: `RFC1213 icmpOutParmProbs`_
+| IcmpOutSrcQuenchs: `RFC1213 icmpOutSrcQuenchs`_
+| IcmpOutRedirects: `RFC1213 icmpOutRedirects`_
+| IcmpOutEchos: `RFC1213 icmpOutEchos`_
+| IcmpOutEchoReps: `RFC1213 icmpOutEchoReps`_
+| IcmpOutTimestamps: `RFC1213 icmpOutTimestamps`_
+| IcmpOutTimestampReps: `RFC1213 icmpOutTimestampReps`_
+| IcmpOutAddrMasks: `RFC1213 icmpOutAddrMasks`_
+| IcmpOutAddrMaskReps: `RFC1213 icmpOutAddrMaskReps`_
+
+.. _RFC1213 icmpInDestUnreachs: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpInTimeExcds: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpInParmProbs: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInSrcQuenchs: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInRedirects: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInEchos: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInEchoReps: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInTimestamps: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInTimestampReps: https://tools.ietf.org/html/rfc1213#page-43
+.. _RFC1213 icmpInAddrMasks: https://tools.ietf.org/html/rfc1213#page-43
+.. _RFC1213 icmpInAddrMaskReps: https://tools.ietf.org/html/rfc1213#page-43
+
+.. _RFC1213 icmpOutDestUnreachs: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutTimeExcds: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutParmProbs: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutSrcQuenchs: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutRedirects: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutEchos: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutEchoReps: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutTimestamps: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutTimestampReps: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutAddrMasks: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutAddrMaskReps: https://tools.ietf.org/html/rfc1213#page-46
+
+Every ICMP type has two counters: 'In' and 'Out'. E.g., for the ICMP
+Echo packet, they are IcmpInEchos and IcmpOutEchos. Their meanings are
+straightforward. The 'In' counter means kernel receives such a packet
+and the 'Out' counter means kernel sends such a packet.
+
+* ICMP numeric types
+They are IcmpMsgInType[N] and IcmpMsgOutType[N], the [N] indicates the
+ICMP type number. These counters track all kinds of ICMP packets. The
+ICMP type number definition could be found in the `ICMP parameters`_
+document.
+
+.. _ICMP parameters: https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
+
+For example, if the Linux kernel sends an ICMP Echo packet, the
+IcmpMsgOutType8 would increase 1. And if kernel gets an ICMP Echo Reply
+packet, IcmpMsgInType0 would increase 1.
+
+* IcmpInCsumErrors
+This counter indicates the checksum of the ICMP packet is
+wrong. Kernel verifies the checksum after updating the IcmpInMsgs and
+before updating IcmpMsgInType[N]. If a packet has bad checksum, the
+IcmpInMsgs would be updated but none of IcmpMsgInType[N] would be updated.
+
+* IcmpInErrors and IcmpOutErrors
+Defined by `RFC1213 icmpInErrors`_ and `RFC1213 icmpOutErrors`_
+
+.. _RFC1213 icmpInErrors: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpOutErrors: https://tools.ietf.org/html/rfc1213#page-43
+
+When an error occurs in the ICMP packet handler path, these two
+counters would be updated. The receiving packet path use IcmpInErrors
+and the sending packet path use IcmpOutErrors. When IcmpInCsumErrors
+is increased, IcmpInErrors would always be increased too.
+
+relationship of the ICMP counters
+-------------------------------
+The sum of IcmpMsgOutType[N] is always equal to IcmpOutMsgs, as they
+are updated at the same time. The sum of IcmpMsgInType[N] plus
+IcmpInErrors should be equal or larger than IcmpInMsgs. When kernel
+receives an ICMP packet, kernel follows below logic:
+
+1. increase IcmpInMsgs
+2. if has any error, update IcmpInErrors and finish the process
+3. update IcmpMsgOutType[N]
+4. handle the packet depending on the type, if has any error, update
+   IcmpInErrors and finish the process
+
+So if all errors occur in step (2), IcmpInMsgs should be equal to the
+sum of IcmpMsgOutType[N] plus IcmpInErrors. If all errors occur in
+step (4), IcmpInMsgs should be equal to the sum of
+IcmpMsgOutType[N]. If the errors occur in both step (2) and step (4),
+IcmpInMsgs should be less than the sum of IcmpMsgOutType[N] plus
+IcmpInErrors.
+
+General TCP counters
+==================
+* TcpInSegs
+Defined in `RFC1213 tcpInSegs`_
+
+.. _RFC1213 tcpInSegs: https://tools.ietf.org/html/rfc1213#page-48
+
+The number of packets received by the TCP layer. As mentioned in
+RFC1213, it includes the packets received in error, such as checksum
+error, invalid TCP header and so on. Only one error won't be included:
+if the layer 2 destination address is not the NIC's layer 2
+address. It might happen if the packet is a multicast or broadcast
+packet, or the NIC is in promiscuous mode. In these situations, the
+packets would be delivered to the TCP layer, but the TCP layer will discard
+these packets before increasing TcpInSegs. The TcpInSegs counter
+isn't aware of GRO. So if two packets are merged by GRO, the TcpInSegs
+counter would only increase 1.
+
+* TcpOutSegs
+Defined in `RFC1213 tcpOutSegs`_
+
+.. _RFC1213 tcpOutSegs: https://tools.ietf.org/html/rfc1213#page-48
+
+The number of packets sent by the TCP layer. As mentioned in RFC1213,
+it excludes the retransmitted packets. But it includes the SYN, ACK
+and RST packets. Doesn't like TcpInSegs, the TcpOutSegs is aware of
+GSO, so if a packet would be split to 2 by GSO, TcpOutSegs will
+increase 2.
+
+* TcpActiveOpens
+Defined in `RFC1213 tcpActiveOpens`_
+
+.. _RFC1213 tcpActiveOpens: https://tools.ietf.org/html/rfc1213#page-47
+
+It means the TCP layer sends a SYN, and come into the SYN-SENT
+state. Every time TcpActiveOpens increases 1, TcpOutSegs should always
+increase 1.
+
+* TcpPassiveOpens
+Defined in `RFC1213 tcpPassiveOpens`_
+
+.. _RFC1213 tcpPassiveOpens: https://tools.ietf.org/html/rfc1213#page-47
+
+It means the TCP layer receives a SYN, replies a SYN+ACK, come into
+the SYN-RCVD state.
+
+TCP Fast Open
+============
+When kernel receives a TCP packet, it has two paths to handler the
+packet, one is fast path, another is slow path. The comment in kernel
+code provides a good explanation of them, I pasted them below::
+
+  It is split into a fast path and a slow path. The fast path is
+  disabled when:
+
+  - A zero window was announced from us
+  - zero window probing
+    is only handled properly on the slow path.
+  - Out of order segments arrived.
+  - Urgent data is expected.
+  - There is no buffer space left
+  - Unexpected TCP flags/window values/header lengths are received
+    (detected by checking the TCP header against pred_flags)
+  - Data is sent in both directions. The fast path only supports pure senders
+    or pure receivers (this means either the sequence number or the ack
+    value must stay constant)
+  - Unexpected TCP option.
+
+Kernel will try to use fast path unless any of the above conditions
+are satisfied. If the packets are out of order, kernel will handle
+them in slow path, which means the performance might be not very
+good. Kernel would also come into slow path if the "Delayed ack" is
+used, because when using "Delayed ack", the data is sent in both
+directions. When the TCP window scale option is not used, kernel will
+try to enable fast path immediately when the connection comes into the
+established state, but if the TCP window scale option is used, kernel
+will disable the fast path at first, and try to enable it after kernel
+receives packets.
+
+* TcpExtTCPPureAcks and TcpExtTCPHPAcks
+If a packet set ACK flag and has no data, it is a pure ACK packet, if
+kernel handles it in the fast path, TcpExtTCPHPAcks will increase 1,
+if kernel handles it in the slow path, TcpExtTCPPureAcks will
+increase 1.
+
+* TcpExtTCPHPHits
+If a TCP packet has data (which means it is not a pure ACK packet),
+and this packet is handled in the fast path, TcpExtTCPHPHits will
+increase 1.
+
+
+TCP abort
+========
+
+
+* TcpExtTCPAbortOnData
+It means TCP layer has data in flight, but need to close the
+connection. So TCP layer sends a RST to the other side, indicate the
+connection is not closed very graceful. An easy way to increase this
+counter is using the SO_LINGER option. Please refer to the SO_LINGER
+section of the `socket man page`_:
+
+.. _socket man page: http://man7.org/linux/man-pages/man7/socket.7.html
+
+By default, when an application closes a connection, the close function
+will return immediately and kernel will try to send the in-flight data
+async. If you use the SO_LINGER option, set l_onoff to 1, and l_linger
+to a positive number, the close function won't return immediately, but
+wait for the in-flight data are acked by the other side, the max wait
+time is l_linger seconds. If set l_onoff to 1 and set l_linger to 0,
+when the application closes a connection, kernel will send a RST
+immediately and increase the TcpExtTCPAbortOnData counter.
+
+* TcpExtTCPAbortOnClose
+This counter means the application has unread data in the TCP layer when
+the application wants to close the TCP connection. In such a situation,
+kernel will send a RST to the other side of the TCP connection.
+
+* TcpExtTCPAbortOnMemory
+When an application closes a TCP connection, kernel still need to track
+the connection, let it complete the TCP disconnect process. E.g. an
+app calls the close method of a socket, kernel sends fin to the other
+side of the connection, then the app has no relationship with the
+socket any more, but kernel need to keep the socket, this socket
+becomes an orphan socket, kernel waits for the reply of the other side,
+and would come to the TIME_WAIT state finally. When kernel has no
+enough memory to keep the orphan socket, kernel would send an RST to
+the other side, and delete the socket, in such situation, kernel will
+increase 1 to the TcpExtTCPAbortOnMemory. Two conditions would trigger
+TcpExtTCPAbortOnMemory:
+
+1. the memory used by the TCP protocol is higher than the third value of
+the tcp_mem. Please refer the tcp_mem section in the `TCP man page`_:
+
+.. _TCP man page: http://man7.org/linux/man-pages/man7/tcp.7.html
+
+2. the orphan socket count is higher than net.ipv4.tcp_max_orphans
+
+
+* TcpExtTCPAbortOnTimeout
+This counter will increase when any of the TCP timers expire. In such
+situation, kernel won't send RST, just give up the connection.
+
+* TcpExtTCPAbortOnLinger
+When a TCP connection comes into FIN_WAIT_2 state, instead of waiting
+for the fin packet from the other side, kernel could send a RST and
+delete the socket immediately. This is not the default behavior of
+Linux kernel TCP stack. By configuring the TCP_LINGER2 socket option,
+you could let kernel follow this behavior.
+
+* TcpExtTCPAbortFailed
+The kernel TCP layer will send RST if the `RFC2525 2.17 section`_ is
+satisfied. If an internal error occurs during this process,
+TcpExtTCPAbortFailed will be increased.
+
+.. _RFC2525 2.17 section: https://tools.ietf.org/html/rfc2525#page-50
+
+examples
+=======
+
+ping test
+--------
+Run the ping command against the public dns server 8.8.8.8::
+
+  nstatuser@nstat-a:~$ ping 8.8.8.8 -c 1
+  PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
+  64 bytes from 8.8.8.8: icmp_seq=1 ttl=119 time=17.8 ms
+
+  --- 8.8.8.8 ping statistics ---
+  1 packets transmitted, 1 received, 0% packet loss, time 0ms
+  rtt min/avg/max/mdev = 17.875/17.875/17.875/0.000 ms
+
+The nstayt result::
+
+  nstatuser@nstat-a:~$ nstat
+  #kernel
+  IpInReceives                    1                  0.0
+  IpInDelivers                    1                  0.0
+  IpOutRequests                   1                  0.0
+  IcmpInMsgs                      1                  0.0
+  IcmpInEchoReps                  1                  0.0
+  IcmpOutMsgs                     1                  0.0
+  IcmpOutEchos                    1                  0.0
+  IcmpMsgInType0                  1                  0.0
+  IcmpMsgOutType8                 1                  0.0
+  IpExtInOctets                   84                 0.0
+  IpExtOutOctets                  84                 0.0
+  IpExtInNoECTPkts                1                  0.0
+
+The Linux server sent an ICMP Echo packet, so IpOutRequests,
+IcmpOutMsgs, IcmpOutEchos and IcmpMsgOutType8 were increased 1. The
+server got ICMP Echo Reply from 8.8.8.8, so IpInReceives, IcmpInMsgs,
+IcmpInEchoReps and IcmpMsgInType0 were increased 1. The ICMP Echo Reply
+was passed to the ICMP layer via IP layer, so IpInDelivers was
+increased 1. The default ping data size is 48, so an ICMP Echo packet
+and its corresponding Echo Reply packet are constructed by:
+
+* 14 bytes MAC header
+* 20 bytes IP header
+* 16 bytes ICMP header
+* 48 bytes data (default value of the ping command)
+
+So the IpExtInOctets and IpExtOutOctets are 20+16+48=84.
+
+tcp 3-way handshake
+------------------
+On server side, we run::
+
+  nstatuser@nstat-b:~$ nc -lknv 0.0.0.0 9000
+  Listening on [0.0.0.0] (family 0, port 9000)
+
+On client side, we run::
+
+  nstatuser@nstat-a:~$ nc -nv 192.168.122.251 9000
+  Connection to 192.168.122.251 9000 port [tcp/*] succeeded!
+
+The server listened on tcp 9000 port, the client connected to it, they
+completed the 3-way handshake.
+
+On server side, we can find below nstat output::
+
+  nstatuser@nstat-b:~$ nstat | grep -i tcp
+  TcpPassiveOpens                 1                  0.0
+  TcpInSegs                       2                  0.0
+  TcpOutSegs                      1                  0.0
+  TcpExtTCPPureAcks               1                  0.0
+
+On client side, we can find below nstat output::
+
+  nstatuser@nstat-a:~$ nstat | grep -i tcp
+  TcpActiveOpens                  1                  0.0
+  TcpInSegs                       1                  0.0
+  TcpOutSegs                      2                  0.0
+
+When the server received the first SYN, it replied a SYN+ACK, and came into
+SYN-RCVD state, so TcpPassiveOpens increased 1. The server received
+SYN, sent SYN+ACK, received ACK, so server sent 1 packet, received 2
+packets, TcpInSegs increased 2, TcpOutSegs increased 1. The last ACK
+of the 3-way handshake is a pure ACK without data, so
+TcpExtTCPPureAcks increased 1.
+
+When the client sent SYN, the client came into the SYN-SENT state, so
+TcpActiveOpens increased 1, the client sent SYN, received SYN+ACK, sent
+ACK, so client sent 2 packets, received 1 packet, TcpInSegs increased
+1, TcpOutSegs increased 2.
+
+TCP normal traffic
+-----------------
+Run nc on server::
+
+  nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000
+  Listening on [0.0.0.0] (family 0, port 9000)
+
+Run nc on client::
+
+  nstatuser@nstat-a:~$ nc -v nstat-b 9000
+  Connection to nstat-b 9000 port [tcp/*] succeeded!
+
+Input a string in the nc client ('hello' in our example)::
+
+  nstatuser@nstat-a:~$ nc -v nstat-b 9000
+  Connection to nstat-b 9000 port [tcp/*] succeeded!
+  hello
+
+The client side nstat output::
+
+  nstatuser@nstat-a:~$ nstat
+  #kernel
+  IpInReceives                    1                  0.0
+  IpInDelivers                    1                  0.0
+  IpOutRequests                   1                  0.0
+  TcpInSegs                       1                  0.0
+  TcpOutSegs                      1                  0.0
+  TcpExtTCPPureAcks               1                  0.0
+  TcpExtTCPOrigDataSent           1                  0.0
+  IpExtInOctets                   52                 0.0
+  IpExtOutOctets                  58                 0.0
+  IpExtInNoECTPkts                1                  0.0
+
+The server side nstat output::
+
+  nstatuser@nstat-b:~$ nstat
+  #kernel
+  IpInReceives                    1                  0.0
+  IpInDelivers                    1                  0.0
+  IpOutRequests                   1                  0.0
+  TcpInSegs                       1                  0.0
+  TcpOutSegs                      1                  0.0
+  IpExtInOctets                   58                 0.0
+  IpExtOutOctets                  52                 0.0
+  IpExtInNoECTPkts                1                  0.0
+
+Input a string in nc client side again ('world' in our exmaple)::
+
+  nstatuser@nstat-a:~$ nc -v nstat-b 9000
+  Connection to nstat-b 9000 port [tcp/*] succeeded!
+  hello
+  world
+
+Client side nstat output::
+
+  nstatuser@nstat-a:~$ nstat
+  #kernel
+  IpInReceives                    1                  0.0
+  IpInDelivers                    1                  0.0
+  IpOutRequests                   1                  0.0
+  TcpInSegs                       1                  0.0
+  TcpOutSegs                      1                  0.0
+  TcpExtTCPHPAcks                 1                  0.0
+  TcpExtTCPOrigDataSent           1                  0.0
+  IpExtInOctets                   52                 0.0
+  IpExtOutOctets                  58                 0.0
+  IpExtInNoECTPkts                1                  0.0
+
+
+Server side nstat output::
+
+  nstatuser@nstat-b:~$ nstat
+  #kernel
+  IpInReceives                    1                  0.0
+  IpInDelivers                    1                  0.0
+  IpOutRequests                   1                  0.0
+  TcpInSegs                       1                  0.0
+  TcpOutSegs                      1                  0.0
+  TcpExtTCPHPHits                 1                  0.0
+  IpExtInOctets                   58                 0.0
+  IpExtOutOctets                  52                 0.0
+  IpExtInNoECTPkts                1                  0.0
+
+Compare the first client-side nstat and the second client-side nstat,
+we could find one difference: the first one had a 'TcpExtTCPPureAcks',
+but the second one had a 'TcpExtTCPHPAcks'. The first server-side
+nstat and the second server-side nstat had a difference too: the
+second server-side nstat had a TcpExtTCPHPHits, but the first
+server-side nstat didn't have it. The network traffic patterns were
+exactly the same: the client sent a packet to the server, the server
+replied an ACK. But kernel handled them in different ways. When the
+TCP window scale option is not used, kernel will try to enable fast
+path immediately when the connection comes into the established state,
+but if the TCP window scale option is used, kernel will disable the
+fast path at first, and try to enable it after kerenl receives
+packets. We could use the 'ss' command to verify whether the window
+scale option is used. e.g. run below command on either server or
+client::
+
+  nstatuser@nstat-a:~$ ss -o state established -i '( dport = :9000 or sport = :9000 )
+  Netid    Recv-Q     Send-Q            Local Address:Port             Peer Address:Port
+  tcp      0          0               192.168.122.250:40654         192.168.122.251:9000
+             ts sack cubic wscale:7,7 rto:204 rtt:0.98/0.49 mss:1448 pmtu:1500 rcvmss:536 advmss:1448 cwnd:10 bytes_acked:1 segs_out:2 segs_in:1 send 118.2Mbps lastsnd:46572 lastrcv:46572 lastack:46572 pacing_rate 236.4Mbps rcv_space:29200 rcv_ssthresh:29200 minrtt:0.98
+
+The 'wscale:7,7' means both server and client set the window scale
+option to 7. Now we could explain the nstat output in our test:
+
+In the first nstat output of client side, the client sent a packet, server
+reply an ACK, when kernel handled this ACK, the fast path was not
+enabled, so the ACK was counted into 'TcpExtTCPPureAcks'.
+
+In the second nstat output of client side, the client sent a packet again,
+and received another ACK from the server, in this time, the fast path is
+enabled, and the ACK was qualified for fast path, so it was handled by
+the fast path, so this ACK was counted into TcpExtTCPHPAcks.
+
+In the first nstat output of server side, fast path was not enabled,
+so there was no 'TcpExtTCPHPHits'.
+
+In the second nstat output of server side, the fast path was enabled,
+and the packet received from client qualified for fast path, so it
+was counted into 'TcpExtTCPHPHits'.
+
+TcpExtTCPAbortOnClose
+--------------------
+On the server side, we run below python script::
+
+  import socket
+  import time
+
+  port = 9000
+
+  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+  s.bind(('0.0.0.0', port))
+  s.listen(1)
+  sock, addr = s.accept()
+  while True:
+      time.sleep(9999999)
+
+This python script listen on 9000 port, but doesn't read anything from
+the connection.
+
+On the client side, we send the string "hello" by nc::
+
+  nstatuser@nstat-a:~$ echo "hello" | nc nstat-b 9000
+
+Then, we come back to the server side, the server has received the "hello"
+packet, and the TCP layer has acked this packet, but the application didn't
+read it yet. We type Ctrl-C to terminate the server script. Then we
+could find TcpExtTCPAbortOnClose increased 1 on the server side::
+
+  nstatuser@nstat-b:~$ nstat | grep -i abort
+  TcpExtTCPAbortOnClose           1                  0.0
+
+If we run tcpdump on the server side, we could find the server sent a
+RST after we type Ctrl-C.
+
+TcpExtTCPAbortOnMemory and TcpExtTCPAbortOnTimeout
+-----------------------------------------------
+Below is an example which let the orphan socket count be higher than
+net.ipv4.tcp_max_orphans.
+Change tcp_max_orphans to a smaller value on client::
+
+  sudo bash -c "echo 10 > /proc/sys/net/ipv4/tcp_max_orphans"
+
+Client code (create 64 connection to server)::
+
+  nstatuser@nstat-a:~$ cat client_orphan.py
+  import socket
+  import time
+
+  server = 'nstat-b' # server address
+  port = 9000
+
+  count = 64
+
+  connection_list = []
+
+  for i in range(64):
+      s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+      s.connect((server, port))
+      connection_list.append(s)
+      print("connection_count: %d" % len(connection_list))
+
+  while True:
+      time.sleep(99999)
+
+Server code (accept 64 connection from client)::
+
+  nstatuser@nstat-b:~$ cat server_orphan.py
+  import socket
+  import time
+
+  port = 9000
+  count = 64
+
+  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+  s.bind(('0.0.0.0', port))
+  s.listen(count)
+  connection_list = []
+  while True:
+      sock, addr = s.accept()
+      connection_list.append((sock, addr))
+      print("connection_count: %d" % len(connection_list))
+
+Run the python scripts on server and client.
+
+On server::
+
+  python3 server_orphan.py
+
+On client::
+
+  python3 client_orphan.py
+
+Run iptables on server::
+
+  sudo iptables -A INPUT -i ens3 -p tcp --destination-port 9000 -j DROP
+
+Type Ctrl-C on client, stop client_orphan.py.
+
+Check TcpExtTCPAbortOnMemory on client::
+
+  nstatuser@nstat-a:~$ nstat | grep -i abort
+  TcpExtTCPAbortOnMemory          54                 0.0
+
+Check orphane socket count on client::
+
+  nstatuser@nstat-a:~$ ss -s
+  Total: 131 (kernel 0)
+  TCP:   14 (estab 1, closed 0, orphaned 10, synrecv 0, timewait 0/0), ports 0
+
+  Transport Total     IP        IPv6
+  *         0         -         -
+  RAW       1         0         1
+  UDP       1         1         0
+  TCP       14        13        1
+  INET      16        14        2
+  FRAG      0         0         0
+
+The explanation of the test: after run server_orphan.py and
+client_orphan.py, we set up 64 connections between server and
+client. Run the iptables command, the server will drop all packets from
+the client, type Ctrl-C on client_orphan.py, the system of the client
+would try to close these connections, and before they are closed
+gracefully, these connections became orphan sockets. As the iptables
+of the server blocked packets from the client, the server won't receive fin
+from the client, so all connection on clients would be stuck on FIN_WAIT_1
+stage, so they will keep as orphan sockets until timeout. We have echo
+10 to /proc/sys/net/ipv4/tcp_max_orphans, so the client system would
+only keep 10 orphan sockets, for all other orphan sockets, the client
+system sent RST for them and delete them. We have 64 connections, so
+the 'ss -s' command shows the system has 10 orphan sockets, and the
+value of TcpExtTCPAbortOnMemory was 54.
+
+An additional explanation about orphan socket count: You could find the
+exactly orphan socket count by the 'ss -s' command, but when kernel
+decide whither increases TcpExtTCPAbortOnMemory and sends RST, kernel
+doesn't always check the exactly orphan socket count. For increasing
+performance, kernel checks an approximate count firstly, if the
+approximate count is more than tcp_max_orphans, kernel checks the
+exact count again. So if the approximate count is less than
+tcp_max_orphans, but exactly count is more than tcp_max_orphans, you
+would find TcpExtTCPAbortOnMemory is not increased at all. If
+tcp_max_orphans is large enough, it won't occur, but if you decrease
+tcp_max_orphans to a small value like our test, you might find this
+issue. So in our test, the client set up 64 connections although the
+tcp_max_orphans is 10. If the client only set up 11 connections, we
+can't find the change of TcpExtTCPAbortOnMemory.
+
+Continue the previous test, we wait for several minutes. Because of the
+iptables on the server blocked the traffic, the server wouldn't receive
+fin, and all the client's orphan sockets would timeout on the
+FIN_WAIT_1 state finally. So we wait for a few minutes, we could find
+10 timeout on the client::
+
+  nstatuser@nstat-a:~$ nstat | grep -i abort
+  TcpExtTCPAbortOnTimeout         10                 0.0
+
+TcpExtTCPAbortOnLinger
+---------------------
+The server side code::
+
+  nstatuser@nstat-b:~$ cat server_linger.py
+  import socket
+  import time
+
+  port = 9000
+
+  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+  s.bind(('0.0.0.0', port))
+  s.listen(1)
+  sock, addr = s.accept()
+  while True:
+      time.sleep(9999999)
+
+The client side code::
+
+  nstatuser@nstat-a:~$ cat client_linger.py
+  import socket
+  import struct
+
+  server = 'nstat-b' # server address
+  port = 9000
+
+  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+  s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 10))
+  s.setsockopt(socket.SOL_TCP, socket.TCP_LINGER2, struct.pack('i', -1))
+  s.connect((server, port))
+  s.close()
+
+Run server_linger.py on server::
+
+  nstatuser@nstat-b:~$ python3 server_linger.py
+
+Run client_linger.py on client::
+
+  nstatuser@nstat-a:~$ python3 client_linger.py
+
+After run client_linger.py, check the output of nstat::
+
+  nstatuser@nstat-a:~$ nstat | grep -i abort
+  TcpExtTCPAbortOnLinger          1                  0.0
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
index 8ff7b4c..a5f103b 100644
--- a/Documentation/networking/vrf.txt
+++ b/Documentation/networking/vrf.txt
@@ -103,19 +103,33 @@
 
 or to specify the output device using cmsg and IP_PKTINFO.
 
+By default the scope of the port bindings for unbound sockets is
+limited to the default VRF. That is, it will not be matched by packets
+arriving on interfaces enslaved to an l3mdev and processes may bind to
+the same port if they bind to an l3mdev.
+
 TCP & UDP services running in the default VRF context (ie., not bound
 to any VRF device) can work across all VRF domains by enabling the
 tcp_l3mdev_accept and udp_l3mdev_accept sysctl options:
+
     sysctl -w net.ipv4.tcp_l3mdev_accept=1
     sysctl -w net.ipv4.udp_l3mdev_accept=1
 
+These options are disabled by default so that a socket in a VRF is only
+selected for packets in that VRF. There is a similar option for RAW
+sockets, which is enabled by default for reasons of backwards compatibility.
+This is so as to specify the output device with cmsg and IP_PKTINFO, but
+using a socket not bound to the corresponding VRF. This allows e.g. older ping
+implementations to be run with specifying the device but without executing it
+in the VRF. This option can be disabled so that packets received in a VRF
+context are only handled by a raw socket bound to the VRF, and packets in the
+default VRF are only handled by a socket not bound to any VRF:
+
+    sysctl -w net.ipv4.raw_l3mdev_accept=0
+
 netfilter rules on the VRF device can be used to limit access to services
 running in the default VRF context as well.
 
-The default VRF does not have limited scope with respect to port bindings.
-That is, if a process does a wildcard bind to a port in the default VRF it
-owns the port across all VRF domains within the network namespace.
-
 ################################################################################
 
 Using iproute2 for VRFs
diff --git a/MAINTAINERS b/MAINTAINERS
index 03c46f4..8131997 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7868,13 +7868,6 @@
 F:	include/uapi/linux/isdn.h
 F:	include/uapi/linux/isdn/
 
-ISDN SUBSYSTEM (Eicon active card driver)
-M:	Armin Schindler <mac@melware.de>
-L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)
-W:	http://www.melware.de
-S:	Maintained
-F:	drivers/isdn/hardware/eicon/
-
 IT87 HARDWARE MONITORING DRIVER
 M:	Jean Delvare <jdelvare@suse.com>
 L:	linux-hwmon@vger.kernel.org
@@ -10704,6 +10697,14 @@
 S:	Supported
 F:	drivers/nfc/nxp-nci
 
+OBJAGG
+M:	Jiri Pirko <jiri@mellanox.com>
+L:	netdev@vger.kernel.org
+S:	Supported
+F:	lib/objagg.c
+F:	lib/test_objagg.c
+F:	include/linux/objagg.h
+
 OBJTOOL
 M:	Josh Poimboeuf <jpoimboe@redhat.com>
 M:	Peter Zijlstra <peterz@infradead.org>
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 4d8cb9b..3a0e34f 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1159,19 +1159,19 @@ static int build_body(struct jit_ctx *ctx)
 			emit_load(r_A, r_skb, off, ctx);
 			break;
 		case BPF_ANC | SKF_AD_VLAN_TAG:
-		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
 			ctx->flags |= SEEN_SKB | SEEN_A;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 						  vlan_tci) != 2);
 			off = offsetof(struct sk_buff, vlan_tci);
-			emit_half_load_unsigned(r_s0, r_skb, off, ctx);
-			if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
-				emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
-			} else {
-				emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
-				/* return 1 if present */
-				emit_sltu(r_A, r_zero, r_A, ctx);
-			}
+			emit_half_load_unsigned(r_A, r_skb, off, ctx);
+			break;
+		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+			ctx->flags |= SEEN_SKB | SEEN_A;
+			emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
+			if (PKT_VLAN_PRESENT_BIT)
+				emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
+			if (PKT_VLAN_PRESENT_BIT < 7)
+				emit_andi(r_A, r_A, 1, ctx);
 			break;
 		case BPF_ANC | SKF_AD_PKTTYPE:
 			ctx->flags |= SEEN_SKB;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d5bfe24..91d223c 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -379,18 +379,17 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 							  hash));
 			break;
 		case BPF_ANC | SKF_AD_VLAN_TAG:
-		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
-			BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 
 			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 							  vlan_tci));
-			if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
-				PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
-			} else {
-				PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
-				PPC_SRWI(r_A, r_A, 12);
-			}
+			break;
+		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+			PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
+			if (PKT_VLAN_PRESENT_BIT)
+				PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT);
+			if (PKT_VLAN_PRESENT_BIT < 7)
+				PPC_ANDI(r_A, r_A, 1);
 			break;
 		case BPF_ANC | SKF_AD_QUEUE:
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index a5ff886..84cc8f7 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -552,15 +552,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
 				emit_skb_load32(hash, r_A);
 				break;
 			case BPF_ANC | SKF_AD_VLAN_TAG:
-			case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
 				emit_skb_load16(vlan_tci, r_A);
-				if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) {
-					emit_alu_K(SRL, 12);
+				break;
+			case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+				__emit_skb_load8(__pkt_vlan_present_offset, r_A);
+				if (PKT_VLAN_PRESENT_BIT)
+					emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT);
+				if (PKT_VLAN_PRESENT_BIT < 7)
 					emit_andi(r_A, 1, r_A);
-				} else {
-					emit_loadimm(~VLAN_TAG_PRESENT, r_TMP);
-					emit_and(r_A, r_TMP, r_A);
-				}
 				break;
 			case BPF_LD | BPF_W | BPF_LEN:
 				emit_skb_load32(len, r_A);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f55ffde..14053e0 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -754,8 +754,8 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
 
 	regs = of_get_property(op->dev.of_node, "reg", NULL);
 
-	return sprintf(page, "   SBUS slot/device:\t\t%d/'%s'\n",
-		       (regs ? regs->which_io : 0), op->dev.of_node->name);
+	return sprintf(page, "   SBUS slot/device:\t\t%d/'%pOFn'\n",
+		       (regs ? regs->which_io : 0), op->dev.of_node);
 }
 
 static const struct fore200e_bus fore200e_sbus_ops = {
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 20209e2..228b91b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1074,8 +1074,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
 	csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
 			port_id * step;
 	csk->sndbuf = newsk->sk_sndbuf;
-	csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
-					 cxgb4_port_viid(ndev));
+	csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
 	tp->rcv_wnd = select_rcv_wnd(csk);
 	RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
 					   WSCALE_OK(tp),
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 615413b..97ecc8c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2058,8 +2058,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
 		}
 		ep->mtu = pdev->mtu;
 		ep->tx_chan = cxgb4_port_chan(pdev);
-		ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
-						cxgb4_port_viid(pdev));
+		ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
 		step = cdev->rdev.lldi.ntxq /
 			cdev->rdev.lldi.nchan;
 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -2078,8 +2077,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
 			goto out;
 		ep->mtu = dst_mtu(dst);
 		ep->tx_chan = cxgb4_port_chan(pdev);
-		ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
-						cxgb4_port_viid(pdev));
+		ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
 		step = cdev->rdev.lldi.ntxq /
 			cdev->rdev.lldi.nchan;
 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -3944,7 +3942,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
 	} else {
 		vlan_eh = (struct vlan_ethhdr *)(req + 1);
 		iph = (struct iphdr *)(vlan_eh + 1);
-		skb->vlan_tci = ntohs(cpl->vlan);
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
 	}
 
 	if (iph->version != 0x4)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 771eb6b..4b3999d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -404,7 +404,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 	if (pdata)
 		pd_len = pdata->size;
 
-	if (cm_node->vlan_id < VLAN_TAG_PRESENT)
+	if (cm_node->vlan_id <= VLAN_VID_MASK)
 		eth_hlen += 4;
 
 	if (cm_node->ipv4)
@@ -433,7 +433,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 
 		ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
 		ether_addr_copy(ethh->h_source, cm_node->loc_mac);
-		if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+		if (cm_node->vlan_id <= VLAN_VID_MASK) {
 			((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
 			vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
 			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
@@ -463,7 +463,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 
 		ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
 		ether_addr_copy(ethh->h_source, cm_node->loc_mac);
-		if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+		if (cm_node->vlan_id <= VLAN_VID_MASK) {
 			((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
 			vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
 			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
@@ -3323,7 +3323,7 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
 
 	tcp_info->flow_label = 0;
 	tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
-	if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+	if (cm_node->vlan_id <= VLAN_VID_MASK) {
 		tcp_info->insert_vlan_tag = true;
 		tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
 						  cm_node->vlan_id);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 82adc0d..4351234 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -181,6 +181,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
 	struct mlx4_ib_cq *cq;
 	struct mlx4_uar *uar;
+	void *buf_addr;
 	int err;
 
 	if (entries < 1 || entries > dev->dev->caps.max_cqes)
@@ -211,6 +212,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 			goto err_cq;
 		}
 
+		buf_addr = (void *)(unsigned long)ucmd.buf_addr;
+
 		err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
 					  ucmd.buf_addr, entries);
 		if (err)
@@ -237,6 +240,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 		if (err)
 			goto err_db;
 
+		buf_addr = &cq->buf.buf;
+
 		uar = &dev->priv_uar;
 		cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
 	}
@@ -246,7 +251,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 
 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
 			    cq->db.dma, &cq->mcq, vector, 0,
-			    !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
+			    !!(cq->create_flags &
+			       IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
+			    buf_addr, !!context);
 	if (err)
 		goto err_dbmap;
 
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index e96ffff..fc0c191 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -223,11 +223,11 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
 		}
 
 		old_skb = skb;
-		skb = skb->next;
+		skb = skb_peek_next(skb, &nesqp->pau_list);
 		skb_unlink(old_skb, &nesqp->pau_list);
 		nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
 		nes_rem_ref_cm_node(nesqp->cm_node);
-		if (skb == (struct sk_buff *)&nesqp->pau_list)
+		if (!skb)
 			goto out;
 	}
 	return skb;
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
index 30d028d..95c4030 100644
--- a/drivers/isdn/hardware/Kconfig
+++ b/drivers/isdn/hardware/Kconfig
@@ -5,5 +5,3 @@
 
 source "drivers/isdn/hardware/avm/Kconfig"
 
-source "drivers/isdn/hardware/eicon/Kconfig"
-
diff --git a/drivers/isdn/hardware/Makefile b/drivers/isdn/hardware/Makefile
index a5d8fce..e503032 100644
--- a/drivers/isdn/hardware/Makefile
+++ b/drivers/isdn/hardware/Makefile
@@ -3,5 +3,4 @@
 # Object files in subdirectories
 
 obj-$(CONFIG_CAPI_AVM)		+= avm/
-obj-$(CONFIG_CAPI_EICON)	+= eicon/
 obj-$(CONFIG_MISDN)		+= mISDN/
diff --git a/drivers/isdn/hardware/eicon/Kconfig b/drivers/isdn/hardware/eicon/Kconfig
deleted file mode 100644
index 6082b6a..0000000
--- a/drivers/isdn/hardware/eicon/Kconfig
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# ISDN DIVAS Eicon driver
-#
-
-menuconfig CAPI_EICON
-	bool "Active Eicon DIVA Server cards"
-	help
-	  Enable support for Eicon Networks active ISDN cards.
-
-if CAPI_EICON
-
-config ISDN_DIVAS
-	tristate "Support Eicon DIVA Server cards"
-	depends on PROC_FS && PCI
-	help
-	  Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card.
-	  In order to use this card, additional firmware is necessary, which
-	  has to be downloaded into the card using the divactrl utility.
-
-if ISDN_DIVAS
-
-config ISDN_DIVAS_BRIPCI
-	bool "DIVA Server BRI/PCI support"
-	help
-	  Enable support for DIVA Server BRI-PCI.
-
-config ISDN_DIVAS_PRIPCI
-	bool "DIVA Server PRI/PCI support"
-	help
-	  Enable support for DIVA Server PRI-PCI.
-
-config ISDN_DIVAS_DIVACAPI
-	tristate "DIVA CAPI2.0 interface support"
-	help
-	  You need this to provide the CAPI interface
-	  for DIVA Server cards.
-
-config ISDN_DIVAS_USERIDI
-	tristate "DIVA User-IDI interface support"
-	help
-	  Enable support for user-mode IDI interface.
-
-config ISDN_DIVAS_MAINT
-	tristate "DIVA Maint driver support"
-	depends on m
-	help
-	  Enable Divas Maintenance driver.
-
-endif # ISDN_DIVAS
-
-endif # CAPI_EICON
diff --git a/drivers/isdn/hardware/eicon/Makefile b/drivers/isdn/hardware/eicon/Makefile
deleted file mode 100644
index a0ab2e2..0000000
--- a/drivers/isdn/hardware/eicon/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the Eicon DIVA ISDN drivers.
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_DIVAS)		+= divadidd.o divas.o
-obj-$(CONFIG_ISDN_DIVAS_MAINT)		+= diva_mnt.o
-obj-$(CONFIG_ISDN_DIVAS_USERIDI)	+= diva_idi.o
-obj-$(CONFIG_ISDN_DIVAS_DIVACAPI)	+= divacapi.o
-
-# Multipart objects. 
-
-divas-y					:= divasmain.o divasfunc.o di.o io.o istream.o \
-					   diva.o divasproc.o diva_dma.o
-divas-$(CONFIG_ISDN_DIVAS_BRIPCI)	+= os_bri.o  s_bri.o os_4bri.o s_4bri.o
-divas-$(CONFIG_ISDN_DIVAS_PRIPCI)	+= os_pri.o  s_pri.o
-
-divacapi-y				:= capimain.o capifunc.o message.o capidtmf.o
-
-divadidd-y				:= diva_didd.o diddfunc.o dadapter.o
-
-diva_mnt-y				:= divamnt.o mntfunc.o debug.o maintidi.o
-
-diva_idi-y				:= divasi.o idifunc.o um_idi.o dqueue.o
diff --git a/drivers/isdn/hardware/eicon/adapter.h b/drivers/isdn/hardware/eicon/adapter.h
deleted file mode 100644
index f9b24eb..0000000
--- a/drivers/isdn/hardware/eicon/adapter.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: adapter.h,v 1.4 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVA_USER_MODE_IDI_ADAPTER_H__
-#define __DIVA_USER_MODE_IDI_ADAPTER_H__
-
-#define DIVA_UM_IDI_ADAPTER_REMOVED 0x00000001
-
-typedef struct _diva_um_idi_adapter {
-	struct list_head link;
-	DESCRIPTOR d;
-	int adapter_nr;
-	struct list_head entity_q;	/* entities linked to this adapter */
-	dword status;
-} diva_um_idi_adapter_t;
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/capi20.h b/drivers/isdn/hardware/eicon/capi20.h
deleted file mode 100644
index 391e417..0000000
--- a/drivers/isdn/hardware/eicon/capi20.h
+++ /dev/null
@@ -1,699 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef _INC_CAPI20
-#define _INC_CAPI20
-/* operations on message queues                             */
-/* the common device type for CAPI20 drivers */
-#define FILE_DEVICE_CAPI20 0x8001
-/* DEVICE_CONTROL codes for user and kernel mode applications */
-#define CAPI20_CTL_REGISTER             0x0801
-#define CAPI20_CTL_RELEASE              0x0802
-#define CAPI20_CTL_GET_MANUFACTURER     0x0805
-#define CAPI20_CTL_GET_VERSION          0x0806
-#define CAPI20_CTL_GET_SERIAL           0x0807
-#define CAPI20_CTL_GET_PROFILE          0x0808
-/* INTERNAL_DEVICE_CONTROL codes for kernel mode applicatios only */
-#define CAPI20_CTL_PUT_MESSAGE          0x0803
-#define CAPI20_CTL_GET_MESSAGE          0x0804
-/* the wrapped codes as required by the system */
-#define CAPI_CTL_CODE(f, m)             CTL_CODE(FILE_DEVICE_CAPI20, f, m, FILE_ANY_ACCESS)
-#define IOCTL_CAPI_REGISTER             CAPI_CTL_CODE(CAPI20_CTL_REGISTER, METHOD_BUFFERED)
-#define IOCTL_CAPI_RELEASE              CAPI_CTL_CODE(CAPI20_CTL_RELEASE, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_MANUFACTURER     CAPI_CTL_CODE(CAPI20_CTL_GET_MANUFACTURER, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_VERSION          CAPI_CTL_CODE(CAPI20_CTL_GET_VERSION, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_SERIAL           CAPI_CTL_CODE(CAPI20_CTL_GET_SERIAL, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_PROFILE          CAPI_CTL_CODE(CAPI20_CTL_GET_PROFILE, METHOD_BUFFERED)
-#define IOCTL_CAPI_PUT_MESSAGE          CAPI_CTL_CODE(CAPI20_CTL_PUT_MESSAGE, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_MESSAGE          CAPI_CTL_CODE(CAPI20_CTL_GET_MESSAGE, METHOD_BUFFERED)
-struct divas_capi_register_params  {
-	word MessageBufferSize;
-	word maxLogicalConnection;
-	word maxBDataBlocks;
-	word maxBDataLen;
-};
-struct divas_capi_version  {
-	word CapiMajor;
-	word CapiMinor;
-	word ManuMajor;
-	word ManuMinor;
-};
-typedef struct api_profile_s {
-	word          Number;
-	word          Channels;
-	dword         Global_Options;
-	dword         B1_Protocols;
-	dword         B2_Protocols;
-	dword         B3_Protocols;
-} API_PROFILE;
-/* ISDN Common API message types                            */
-#define _ALERT_R                        0x8001
-#define _CONNECT_R                      0x8002
-#define _CONNECT_I                      0x8202
-#define _CONNECT_ACTIVE_I               0x8203
-#define _DISCONNECT_R                   0x8004
-#define _DISCONNECT_I                   0x8204
-#define _LISTEN_R                       0x8005
-#define _INFO_R                         0x8008
-#define _INFO_I                         0x8208
-#define _SELECT_B_REQ                   0x8041
-#define _FACILITY_R                     0x8080
-#define _FACILITY_I                     0x8280
-#define _CONNECT_B3_R                   0x8082
-#define _CONNECT_B3_I                   0x8282
-#define _CONNECT_B3_ACTIVE_I            0x8283
-#define _DISCONNECT_B3_R                0x8084
-#define _DISCONNECT_B3_I                0x8284
-#define _DATA_B3_R                      0x8086
-#define _DATA_B3_I                      0x8286
-#define _RESET_B3_R                     0x8087
-#define _RESET_B3_I                     0x8287
-#define _CONNECT_B3_T90_ACTIVE_I        0x8288
-#define _MANUFACTURER_R                 0x80ff
-#define _MANUFACTURER_I                 0x82ff
-/* OR this to convert a REQUEST to a CONFIRM                */
-#define CONFIRM                 0x0100
-/* OR this to convert a INDICATION to a RESPONSE            */
-#define RESPONSE                0x0100
-/*------------------------------------------------------------------*/
-/* diehl isdn private MANUFACTURER codes                            */
-/*------------------------------------------------------------------*/
-#define _DI_MANU_ID             0x44444944
-#define _DI_ASSIGN_PLCI         0x0001
-#define _DI_ADV_CODEC           0x0002
-#define _DI_DSP_CTRL            0x0003
-#define _DI_SIG_CTRL            0x0004
-#define _DI_RXT_CTRL            0x0005
-#define _DI_IDI_CTRL            0x0006
-#define _DI_CFG_CTRL            0x0007
-#define _DI_REMOVE_CODEC        0x0008
-#define _DI_OPTIONS_REQUEST     0x0009
-#define _DI_SSEXT_CTRL          0x000a
-#define _DI_NEGOTIATE_B3        0x000b
-/*------------------------------------------------------------------*/
-/* parameter structures                                             */
-/*------------------------------------------------------------------*/
-/* ALERT-REQUEST                                            */
-typedef struct {
-	byte structs[0];      /* Additional Info */
-} _ALT_REQP;
-/* ALERT-CONFIRM                                            */
-typedef struct {
-	word Info;
-} _ALT_CONP;
-/* CONNECT-REQUEST                                          */
-typedef struct {
-	word CIP_Value;
-	byte structs[0];      /* Called party number,
-				 Called party subaddress,
-				 Calling party number,
-				 Calling party subaddress,
-				 B_protocol,
-				 BC,
-				 LLC,
-				 HLC,
-				 Additional Info */
-} _CON_REQP;
-/* CONNECT-CONFIRM                                          */
-typedef struct {
-	word Info;
-} _CON_CONP;
-/* CONNECT-INDICATION                                       */
-typedef struct {
-	word CIP_Value;
-	byte structs[0];      /* Called party number,
-				 Called party subaddress,
-				 Calling party number,
-				 Calling party subaddress,
-				 BC,
-				 LLC,
-				 HLC,
-				 Additional Info */
-} _CON_INDP;
-/* CONNECT-RESPONSE                                         */
-typedef struct {
-	word Accept;
-	byte structs[0];      /* B_protocol,
-				 Connected party number,
-				 Connected party subaddress,
-				 LLC */
-} _CON_RESP;
-/* CONNECT-ACTIVE-INDICATION                                */
-typedef struct {
-	byte structs[0];      /* Connected party number,
-				 Connected party subaddress,
-				 LLC */
-} _CON_A_INDP;
-/* CONNECT-ACTIVE-RESPONSE                                  */
-typedef struct {
-	byte structs[0];      /* empty */
-} _CON_A_RESP;
-/* DISCONNECT-REQUEST                                       */
-typedef struct {
-	byte structs[0];      /* Additional Info */
-} _DIS_REQP;
-/* DISCONNECT-CONFIRM                                       */
-typedef struct {
-	word Info;
-} _DIS_CONP;
-/* DISCONNECT-INDICATION                                    */
-typedef struct {
-	word Info;
-} _DIS_INDP;
-/* DISCONNECT-RESPONSE                                      */
-typedef struct {
-	byte structs[0];      /* empty */
-} _DIS_RESP;
-/* LISTEN-REQUEST                                           */
-typedef struct {
-	dword Info_Mask;
-	dword CIP_Mask;
-	byte structs[0];      /* Calling party number,
-				 Calling party subaddress */
-} _LIS_REQP;
-/* LISTEN-CONFIRM                                           */
-typedef struct {
-	word Info;
-} _LIS_CONP;
-/* INFO-REQUEST                                             */
-typedef struct {
-	byte structs[0];      /* Called party number,
-				 Additional Info */
-} _INF_REQP;
-/* INFO-CONFIRM                                             */
-typedef struct {
-	word Info;
-} _INF_CONP;
-/* INFO-INDICATION                                          */
-typedef struct {
-	word Number;
-	byte structs[0];      /* Info element */
-} _INF_INDP;
-/* INFO-RESPONSE                                            */
-typedef struct {
-	byte structs[0];      /* empty */
-} _INF_RESP;
-/* SELECT-B-REQUEST                                         */
-typedef struct {
-	byte structs[0];      /* B-protocol */
-} _SEL_B_REQP;
-/* SELECT-B-CONFIRM                                         */
-typedef struct {
-	word Info;
-} _SEL_B_CONP;
-/* FACILITY-REQUEST */
-typedef struct {
-	word Selector;
-	byte structs[0];      /* Facility parameters */
-} _FAC_REQP;
-/* FACILITY-CONFIRM STRUCT FOR SUPPLEMENT. SERVICES */
-typedef struct {
-	byte  struct_length;
-	word  function;
-	byte  length;
-	word  SupplementaryServiceInfo;
-	dword SupportedServices;
-} _FAC_CON_STRUCTS;
-/* FACILITY-CONFIRM */
-typedef struct {
-	word Info;
-	word Selector;
-	byte structs[0];      /* Facility parameters */
-} _FAC_CONP;
-/* FACILITY-INDICATION */
-typedef struct {
-	word Selector;
-	byte structs[0];      /* Facility parameters */
-} _FAC_INDP;
-/* FACILITY-RESPONSE */
-typedef struct {
-	word Selector;
-	byte structs[0];      /* Facility parameters */
-} _FAC_RESP;
-/* CONNECT-B3-REQUEST                                       */
-typedef struct {
-	byte structs[0];      /* NCPI */
-} _CON_B3_REQP;
-/* CONNECT-B3-CONFIRM                                       */
-typedef struct {
-	word Info;
-} _CON_B3_CONP;
-/* CONNECT-B3-INDICATION                                    */
-typedef struct {
-	byte structs[0];      /* NCPI */
-} _CON_B3_INDP;
-/* CONNECT-B3-RESPONSE                                      */
-typedef struct {
-	word Accept;
-	byte structs[0];      /* NCPI */
-} _CON_B3_RESP;
-/* CONNECT-B3-ACTIVE-INDICATION                             */
-typedef struct {
-	byte structs[0];      /* NCPI */
-} _CON_B3_A_INDP;
-/* CONNECT-B3-ACTIVE-RESPONSE                               */
-typedef struct {
-	byte structs[0];      /* empty */
-} _CON_B3_A_RESP;
-/* DISCONNECT-B3-REQUEST                                    */
-typedef struct {
-	byte structs[0];      /* NCPI */
-} _DIS_B3_REQP;
-/* DISCONNECT-B3-CONFIRM                                    */
-typedef struct {
-	word Info;
-} _DIS_B3_CONP;
-/* DISCONNECT-B3-INDICATION                                 */
-typedef struct {
-	word Info;
-	byte structs[0];      /* NCPI */
-} _DIS_B3_INDP;
-/* DISCONNECT-B3-RESPONSE                                   */
-typedef struct {
-	byte structs[0];      /* empty */
-} _DIS_B3_RESP;
-/* DATA-B3-REQUEST                                          */
-typedef struct {
-	dword         Data;
-	word          Data_Length;
-	word          Number;
-	word          Flags;
-} _DAT_B3_REQP;
-/* DATA-B3-REQUEST 64 BIT Systems                           */
-typedef struct {
-	dword         Data;
-	word          Data_Length;
-	word          Number;
-	word          Flags;
-	void          *pData;
-} _DAT_B3_REQ64P;
-/* DATA-B3-CONFIRM                                          */
-typedef struct {
-	word          Number;
-	word          Info;
-} _DAT_B3_CONP;
-/* DATA-B3-INDICATION                                       */
-typedef struct {
-	dword         Data;
-	word          Data_Length;
-	word          Number;
-	word          Flags;
-} _DAT_B3_INDP;
-/* DATA-B3-INDICATION  64 BIT Systems                       */
-typedef struct {
-	dword         Data;
-	word          Data_Length;
-	word          Number;
-	word          Flags;
-	void          *pData;
-} _DAT_B3_IND64P;
-/* DATA-B3-RESPONSE                                         */
-typedef struct {
-	word          Number;
-} _DAT_B3_RESP;
-/* RESET-B3-REQUEST                                         */
-typedef struct {
-	byte structs[0];      /* NCPI */
-} _RES_B3_REQP;
-/* RESET-B3-CONFIRM                                         */
-typedef struct {
-	word Info;
-} _RES_B3_CONP;
-/* RESET-B3-INDICATION                                      */
-typedef struct {
-	byte structs[0];      /* NCPI */
-} _RES_B3_INDP;
-/* RESET-B3-RESPONSE                                        */
-typedef struct {
-	byte structs[0];      /* empty */
-} _RES_B3_RESP;
-/* CONNECT-B3-T90-ACTIVE-INDICATION                         */
-typedef struct {
-	byte structs[0];      /* NCPI */
-} _CON_B3_T90_A_INDP;
-/* CONNECT-B3-T90-ACTIVE-RESPONSE                           */
-typedef struct {
-	word Reject;
-	byte structs[0];      /* NCPI */
-} _CON_B3_T90_A_RESP;
-/*------------------------------------------------------------------*/
-/* message structure                                                */
-/*------------------------------------------------------------------*/
-typedef struct _API_MSG CAPI_MSG;
-typedef struct _MSG_HEADER CAPI_MSG_HEADER;
-struct _API_MSG {
-	struct _MSG_HEADER {
-		word        length;
-		word        appl_id;
-		word        command;
-		word        number;
-		byte        controller;
-		byte        plci;
-		word        ncci;
-	} header;
-	union {
-		_ALT_REQP           alert_req;
-		_ALT_CONP           alert_con;
-		_CON_REQP           connect_req;
-		_CON_CONP           connect_con;
-		_CON_INDP           connect_ind;
-		_CON_RESP           connect_res;
-		_CON_A_INDP         connect_a_ind;
-		_CON_A_RESP         connect_a_res;
-		_DIS_REQP           disconnect_req;
-		_DIS_CONP           disconnect_con;
-		_DIS_INDP           disconnect_ind;
-		_DIS_RESP           disconnect_res;
-		_LIS_REQP           listen_req;
-		_LIS_CONP           listen_con;
-		_INF_REQP           info_req;
-		_INF_CONP           info_con;
-		_INF_INDP           info_ind;
-		_INF_RESP           info_res;
-		_SEL_B_REQP         select_b_req;
-		_SEL_B_CONP         select_b_con;
-		_FAC_REQP           facility_req;
-		_FAC_CONP           facility_con;
-		_FAC_INDP           facility_ind;
-		_FAC_RESP           facility_res;
-		_CON_B3_REQP        connect_b3_req;
-		_CON_B3_CONP        connect_b3_con;
-		_CON_B3_INDP        connect_b3_ind;
-		_CON_B3_RESP        connect_b3_res;
-		_CON_B3_A_INDP      connect_b3_a_ind;
-		_CON_B3_A_RESP      connect_b3_a_res;
-		_DIS_B3_REQP        disconnect_b3_req;
-		_DIS_B3_CONP        disconnect_b3_con;
-		_DIS_B3_INDP        disconnect_b3_ind;
-		_DIS_B3_RESP        disconnect_b3_res;
-		_DAT_B3_REQP        data_b3_req;
-		_DAT_B3_REQ64P      data_b3_req64;
-		_DAT_B3_CONP        data_b3_con;
-		_DAT_B3_INDP        data_b3_ind;
-		_DAT_B3_IND64P      data_b3_ind64;
-		_DAT_B3_RESP        data_b3_res;
-		_RES_B3_REQP        reset_b3_req;
-		_RES_B3_CONP        reset_b3_con;
-		_RES_B3_INDP        reset_b3_ind;
-		_RES_B3_RESP        reset_b3_res;
-		_CON_B3_T90_A_INDP  connect_b3_t90_a_ind;
-		_CON_B3_T90_A_RESP  connect_b3_t90_a_res;
-		byte                b[200];
-	} info;
-};
-/*------------------------------------------------------------------*/
-/* non-fatal errors                                                 */
-/*------------------------------------------------------------------*/
-#define _NCPI_IGNORED           0x0001
-#define _FLAGS_IGNORED          0x0002
-#define _ALERT_IGNORED          0x0003
-/*------------------------------------------------------------------*/
-/* API function error codes                                         */
-/*------------------------------------------------------------------*/
-#define GOOD                            0x0000
-#define _TOO_MANY_APPLICATIONS          0x1001
-#define _BLOCK_TOO_SMALL                0x1002
-#define _BUFFER_TOO_BIG                 0x1003
-#define _MSG_BUFFER_TOO_SMALL           0x1004
-#define _TOO_MANY_CONNECTIONS           0x1005
-#define _REG_CAPI_BUSY                  0x1007
-#define _REG_RESOURCE_ERROR             0x1008
-#define _REG_CAPI_NOT_INSTALLED         0x1009
-#define _WRONG_APPL_ID                  0x1101
-#define _BAD_MSG                        0x1102
-#define _QUEUE_FULL                     0x1103
-#define _GET_NO_MSG                     0x1104
-#define _MSG_LOST                       0x1105
-#define _WRONG_NOTIFY                   0x1106
-#define _CAPI_BUSY                      0x1107
-#define _RESOURCE_ERROR                 0x1108
-#define _CAPI_NOT_INSTALLED             0x1109
-#define _NO_EXTERNAL_EQUIPMENT          0x110a
-#define _ONLY_EXTERNAL_EQUIPMENT        0x110b
-/*------------------------------------------------------------------*/
-/* addressing/coding error codes                                    */
-/*------------------------------------------------------------------*/
-#define _WRONG_STATE                    0x2001
-#define _WRONG_IDENTIFIER               0x2002
-#define _OUT_OF_PLCI                    0x2003
-#define _OUT_OF_NCCI                    0x2004
-#define _OUT_OF_LISTEN                  0x2005
-#define _OUT_OF_FAX                     0x2006
-#define _WRONG_MESSAGE_FORMAT           0x2007
-#define _OUT_OF_INTERCONNECT_RESOURCES  0x2008
-/*------------------------------------------------------------------*/
-/* configuration error codes                                        */
-/*------------------------------------------------------------------*/
-#define _B1_NOT_SUPPORTED                    0x3001
-#define _B2_NOT_SUPPORTED                    0x3002
-#define _B3_NOT_SUPPORTED                    0x3003
-#define _B1_PARM_NOT_SUPPORTED               0x3004
-#define _B2_PARM_NOT_SUPPORTED               0x3005
-#define _B3_PARM_NOT_SUPPORTED               0x3006
-#define _B_STACK_NOT_SUPPORTED               0x3007
-#define _NCPI_NOT_SUPPORTED                  0x3008
-#define _CIP_NOT_SUPPORTED                   0x3009
-#define _FLAGS_NOT_SUPPORTED                 0x300a
-#define _FACILITY_NOT_SUPPORTED              0x300b
-#define _DATA_LEN_NOT_SUPPORTED              0x300c
-#define _RESET_NOT_SUPPORTED                 0x300d
-#define _SUPPLEMENTARY_SERVICE_NOT_SUPPORTED 0x300e
-#define _REQUEST_NOT_ALLOWED_IN_THIS_STATE   0x3010
-#define _FACILITY_SPECIFIC_FUNCTION_NOT_SUPP 0x3011
-/*------------------------------------------------------------------*/
-/* reason codes                                                     */
-/*------------------------------------------------------------------*/
-#define _L1_ERROR                       0x3301
-#define _L2_ERROR                       0x3302
-#define _L3_ERROR                       0x3303
-#define _OTHER_APPL_CONNECTED           0x3304
-#define _CAPI_GUARD_ERROR               0x3305
-#define _L3_CAUSE                       0x3400
-/*------------------------------------------------------------------*/
-/* b3 reason codes                                                  */
-/*------------------------------------------------------------------*/
-#define _B_CHANNEL_LOST                 0x3301
-#define _B2_ERROR                       0x3302
-#define _B3_ERROR                       0x3303
-/*------------------------------------------------------------------*/
-/* fax error codes                                                  */
-/*------------------------------------------------------------------*/
-#define _FAX_NO_CONNECTION              0x3311
-#define _FAX_TRAINING_ERROR             0x3312
-#define _FAX_REMOTE_REJECT              0x3313
-#define _FAX_REMOTE_ABORT               0x3314
-#define _FAX_PROTOCOL_ERROR             0x3315
-#define _FAX_TX_UNDERRUN                0x3316
-#define _FAX_RX_OVERFLOW                0x3317
-#define _FAX_LOCAL_ABORT                0x3318
-#define _FAX_PARAMETER_ERROR            0x3319
-/*------------------------------------------------------------------*/
-/* line interconnect error codes                                    */
-/*------------------------------------------------------------------*/
-#define _LI_USER_INITIATED               0x0000
-#define _LI_LINE_NO_LONGER_AVAILABLE     0x3805
-#define _LI_INTERCONNECT_NOT_ESTABLISHED 0x3806
-#define _LI_LINES_NOT_COMPATIBLE         0x3807
-#define _LI2_USER_INITIATED              0x0000
-#define _LI2_PLCI_HAS_NO_BCHANNEL        0x3800
-#define _LI2_LINES_NOT_COMPATIBLE        0x3801
-#define _LI2_NOT_IN_SAME_INTERCONNECTION 0x3802
-/*------------------------------------------------------------------*/
-/* global options                                                   */
-/*------------------------------------------------------------------*/
-#define GL_INTERNAL_CONTROLLER_SUPPORTED     0x00000001L
-#define GL_EXTERNAL_EQUIPMENT_SUPPORTED      0x00000002L
-#define GL_HANDSET_SUPPORTED                 0x00000004L
-#define GL_DTMF_SUPPORTED                    0x00000008L
-#define GL_SUPPLEMENTARY_SERVICES_SUPPORTED  0x00000010L
-#define GL_CHANNEL_ALLOCATION_SUPPORTED      0x00000020L
-#define GL_BCHANNEL_OPERATION_SUPPORTED      0x00000040L
-#define GL_LINE_INTERCONNECT_SUPPORTED       0x00000080L
-#define GL_ECHO_CANCELLER_SUPPORTED          0x00000100L
-/*------------------------------------------------------------------*/
-/* protocol selection                                               */
-/*------------------------------------------------------------------*/
-#define B1_HDLC                 0
-#define B1_TRANSPARENT          1
-#define B1_V110_ASYNC           2
-#define B1_V110_SYNC            3
-#define B1_T30                  4
-#define B1_HDLC_INVERTED        5
-#define B1_TRANSPARENT_R        6
-#define B1_MODEM_ALL_NEGOTIATE  7
-#define B1_MODEM_ASYNC          8
-#define B1_MODEM_SYNC_HDLC      9
-#define B2_X75                  0
-#define B2_TRANSPARENT          1
-#define B2_SDLC                 2
-#define B2_LAPD                 3
-#define B2_T30                  4
-#define B2_PPP                  5
-#define B2_TRANSPARENT_NO_CRC   6
-#define B2_MODEM_EC_COMPRESSION 7
-#define B2_X75_V42BIS           8
-#define B2_V120_ASYNC           9
-#define B2_V120_ASYNC_V42BIS    10
-#define B2_V120_BIT_TRANSPARENT 11
-#define B2_LAPD_FREE_SAPI_SEL   12
-#define B3_TRANSPARENT          0
-#define B3_T90NL                1
-#define B3_ISO8208              2
-#define B3_X25_DCE              3
-#define B3_T30                  4
-#define B3_T30_WITH_EXTENSIONS  5
-#define B3_RESERVED             6
-#define B3_MODEM                7
-/*------------------------------------------------------------------*/
-/*  facility definitions                                            */
-/*------------------------------------------------------------------*/
-#define SELECTOR_HANDSET            0
-#define SELECTOR_DTMF               1
-#define SELECTOR_V42BIS             2
-#define SELECTOR_SU_SERV            3
-#define SELECTOR_POWER_MANAGEMENT   4
-#define SELECTOR_LINE_INTERCONNECT  5
-#define SELECTOR_ECHO_CANCELLER     6
-/*------------------------------------------------------------------*/
-/*  supplementary services definitions                              */
-/*------------------------------------------------------------------*/
-#define S_GET_SUPPORTED_SERVICES  0x0000
-#define S_LISTEN                  0x0001
-#define S_HOLD                    0x0002
-#define S_RETRIEVE                0x0003
-#define S_SUSPEND                 0x0004
-#define S_RESUME                  0x0005
-#define S_ECT                     0x0006
-#define S_3PTY_BEGIN              0x0007
-#define S_3PTY_END                0x0008
-#define S_CALL_DEFLECTION         0x000d
-#define S_CALL_FORWARDING_START   0x0009
-#define S_CALL_FORWARDING_STOP    0x000a
-#define S_INTERROGATE_DIVERSION   0x000b /* or interrogate parameters */
-#define S_INTERROGATE_NUMBERS     0x000c
-#define S_CCBS_REQUEST            0x000f
-#define S_CCBS_DEACTIVATE         0x0010
-#define S_CCBS_INTERROGATE        0x0011
-#define S_CCBS_CALL               0x0012
-#define S_MWI_ACTIVATE            0x0013
-#define S_MWI_DEACTIVATE          0x0014
-#define S_CONF_BEGIN           0x0017
-#define S_CONF_ADD                0x0018
-#define S_CONF_SPLIT           0x0019
-#define S_CONF_DROP               0x001a
-#define S_CONF_ISOLATE           0x001b
-#define S_CONF_REATTACH           0x001c
-#define S_CCBS_ERASECALLLINKAGEID 0x800d
-#define S_CCBS_STOP_ALERTING      0x8012
-#define S_CCBS_INFO_RETAIN        0x8013
-#define S_MWI_INDICATE            0x8014
-#define S_CONF_PARTYDISC          0x8016
-#define S_CONF_NOTIFICATION       0x8017
-/* Service Masks */
-#define MASK_HOLD_RETRIEVE        0x00000001
-#define MASK_TERMINAL_PORTABILITY 0x00000002
-#define MASK_ECT                  0x00000004
-#define MASK_3PTY                 0x00000008
-#define MASK_CALL_FORWARDING      0x00000010
-#define MASK_CALL_DEFLECTION      0x00000020
-#define MASK_MWI                  0x00000100
-#define MASK_CCNR                 0x00000200
-#define MASK_CONF                 0x00000400
-/*------------------------------------------------------------------*/
-/*  dtmf definitions                                                */
-/*------------------------------------------------------------------*/
-#define DTMF_LISTEN_START     1
-#define DTMF_LISTEN_STOP      2
-#define DTMF_DIGITS_SEND      3
-#define DTMF_SUCCESS          0
-#define DTMF_INCORRECT_DIGIT  1
-#define DTMF_UNKNOWN_REQUEST  2
-/*------------------------------------------------------------------*/
-/*  line interconnect definitions                                   */
-/*------------------------------------------------------------------*/
-#define LI_GET_SUPPORTED_SERVICES       0
-#define LI_REQ_CONNECT                  1
-#define LI_REQ_DISCONNECT               2
-#define LI_IND_CONNECT_ACTIVE           1
-#define LI_IND_DISCONNECT               2
-#define LI_FLAG_CONFERENCE_A_B          ((dword) 0x00000001L)
-#define LI_FLAG_CONFERENCE_B_A          ((dword) 0x00000002L)
-#define LI_FLAG_MONITOR_A               ((dword) 0x00000004L)
-#define LI_FLAG_MONITOR_B               ((dword) 0x00000008L)
-#define LI_FLAG_ANNOUNCEMENT_A          ((dword) 0x00000010L)
-#define LI_FLAG_ANNOUNCEMENT_B          ((dword) 0x00000020L)
-#define LI_FLAG_MIX_A                   ((dword) 0x00000040L)
-#define LI_FLAG_MIX_B                   ((dword) 0x00000080L)
-#define LI_CONFERENCING_SUPPORTED       ((dword) 0x00000001L)
-#define LI_MONITORING_SUPPORTED         ((dword) 0x00000002L)
-#define LI_ANNOUNCEMENTS_SUPPORTED      ((dword) 0x00000004L)
-#define LI_MIXING_SUPPORTED             ((dword) 0x00000008L)
-#define LI_CROSS_CONTROLLER_SUPPORTED   ((dword) 0x00000010L)
-#define LI2_GET_SUPPORTED_SERVICES      0
-#define LI2_REQ_CONNECT                 1
-#define LI2_REQ_DISCONNECT              2
-#define LI2_IND_CONNECT_ACTIVE          1
-#define LI2_IND_DISCONNECT              2
-#define LI2_FLAG_INTERCONNECT_A_B       ((dword) 0x00000001L)
-#define LI2_FLAG_INTERCONNECT_B_A       ((dword) 0x00000002L)
-#define LI2_FLAG_MONITOR_B              ((dword) 0x00000004L)
-#define LI2_FLAG_MIX_B                  ((dword) 0x00000008L)
-#define LI2_FLAG_MONITOR_X              ((dword) 0x00000010L)
-#define LI2_FLAG_MIX_X                  ((dword) 0x00000020L)
-#define LI2_FLAG_LOOP_B                 ((dword) 0x00000040L)
-#define LI2_FLAG_LOOP_PC                ((dword) 0x00000080L)
-#define LI2_FLAG_LOOP_X                 ((dword) 0x00000100L)
-#define LI2_CROSS_CONTROLLER_SUPPORTED  ((dword) 0x00000001L)
-#define LI2_ASYMMETRIC_SUPPORTED        ((dword) 0x00000002L)
-#define LI2_MONITORING_SUPPORTED        ((dword) 0x00000004L)
-#define LI2_MIXING_SUPPORTED            ((dword) 0x00000008L)
-#define LI2_REMOTE_MONITORING_SUPPORTED ((dword) 0x00000010L)
-#define LI2_REMOTE_MIXING_SUPPORTED     ((dword) 0x00000020L)
-#define LI2_B_LOOPING_SUPPORTED         ((dword) 0x00000040L)
-#define LI2_PC_LOOPING_SUPPORTED        ((dword) 0x00000080L)
-#define LI2_X_LOOPING_SUPPORTED         ((dword) 0x00000100L)
-/*------------------------------------------------------------------*/
-/* echo canceller definitions                                       */
-/*------------------------------------------------------------------*/
-#define EC_GET_SUPPORTED_SERVICES            0
-#define EC_ENABLE_OPERATION                  1
-#define EC_DISABLE_OPERATION                 2
-#define EC_ENABLE_NON_LINEAR_PROCESSING      0x0001
-#define EC_DO_NOT_REQUIRE_REVERSALS          0x0002
-#define EC_DETECT_DISABLE_TONE               0x0004
-#define EC_ENABLE_ADAPTIVE_PREDELAY          0x0008
-#define EC_NON_LINEAR_PROCESSING_SUPPORTED   0x0001
-#define EC_BYPASS_ON_ANY_2100HZ_SUPPORTED    0x0002
-#define EC_BYPASS_ON_REV_2100HZ_SUPPORTED    0x0004
-#define EC_ADAPTIVE_PREDELAY_SUPPORTED       0x0008
-#define EC_BYPASS_INDICATION                 1
-#define EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ   1
-#define EC_BYPASS_DUE_TO_REVERSED_2100HZ     2
-#define EC_BYPASS_RELEASED                   3
-/*------------------------------------------------------------------*/
-/* function prototypes                                              */
-/*------------------------------------------------------------------*/
-/*------------------------------------------------------------------*/
-#endif /* _INC_CAPI20 */
diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
deleted file mode 100644
index e3f7784..0000000
--- a/drivers/isdn/hardware/eicon/capidtmf.c
+++ /dev/null
@@ -1,685 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include "platform.h"
-
-
-
-
-
-
-
-
-
-#include "capidtmf.h"
-
-/* #define TRACE_ */
-
-#define FILE_ "CAPIDTMF.C"
-
-/*---------------------------------------------------------------------------*/
-
-
-#define trace(a)
-
-
-
-/*---------------------------------------------------------------------------*/
-
-static short capidtmf_expand_table_alaw[0x0100] =
-{
-	-5504,   5504,   -344,    344, -22016,  22016,  -1376,   1376,
-	-2752,   2752,    -88,     88, -11008,  11008,   -688,    688,
-	-7552,   7552,   -472,    472, -30208,  30208,  -1888,   1888,
-	-3776,   3776,   -216,    216, -15104,  15104,   -944,    944,
-	-4480,   4480,   -280,    280, -17920,  17920,  -1120,   1120,
-	-2240,   2240,    -24,     24,  -8960,   8960,   -560,    560,
-	-6528,   6528,   -408,    408, -26112,  26112,  -1632,   1632,
-	-3264,   3264,   -152,    152, -13056,  13056,   -816,    816,
-	-6016,   6016,   -376,    376, -24064,  24064,  -1504,   1504,
-	-3008,   3008,   -120,    120, -12032,  12032,   -752,    752,
-	-8064,   8064,   -504,    504, -32256,  32256,  -2016,   2016,
-	-4032,   4032,   -248,    248, -16128,  16128,  -1008,   1008,
-	-4992,   4992,   -312,    312, -19968,  19968,  -1248,   1248,
-	-2496,   2496,    -56,     56,  -9984,   9984,   -624,    624,
-	-7040,   7040,   -440,    440, -28160,  28160,  -1760,   1760,
-	-3520,   3520,   -184,    184, -14080,  14080,   -880,    880,
-	-5248,   5248,   -328,    328, -20992,  20992,  -1312,   1312,
-	-2624,   2624,    -72,     72, -10496,  10496,   -656,    656,
-	-7296,   7296,   -456,    456, -29184,  29184,  -1824,   1824,
-	-3648,   3648,   -200,    200, -14592,  14592,   -912,    912,
-	-4224,   4224,   -264,    264, -16896,  16896,  -1056,   1056,
-	-2112,   2112,     -8,      8,  -8448,   8448,   -528,    528,
-	-6272,   6272,   -392,    392, -25088,  25088,  -1568,   1568,
-	-3136,   3136,   -136,    136, -12544,  12544,   -784,    784,
-	-5760,   5760,   -360,    360, -23040,  23040,  -1440,   1440,
-	-2880,   2880,   -104,    104, -11520,  11520,   -720,    720,
-	-7808,   7808,   -488,    488, -31232,  31232,  -1952,   1952,
-	-3904,   3904,   -232,    232, -15616,  15616,   -976,    976,
-	-4736,   4736,   -296,    296, -18944,  18944,  -1184,   1184,
-	-2368,   2368,    -40,     40,  -9472,   9472,   -592,    592,
-	-6784,   6784,   -424,    424, -27136,  27136,  -1696,   1696,
-	-3392,   3392,   -168,    168, -13568,  13568,   -848,    848
-};
-
-static short capidtmf_expand_table_ulaw[0x0100] =
-{
-	-32124,  32124,  -1884,   1884,  -7932,   7932,   -372,    372,
-	-15996,  15996,   -876,    876,  -3900,   3900,   -120,    120,
-	-23932,  23932,  -1372,   1372,  -5884,   5884,   -244,    244,
-	-11900,  11900,   -620,    620,  -2876,   2876,    -56,     56,
-	-28028,  28028,  -1628,   1628,  -6908,   6908,   -308,    308,
-	-13948,  13948,   -748,    748,  -3388,   3388,    -88,     88,
-	-19836,  19836,  -1116,   1116,  -4860,   4860,   -180,    180,
-	-9852,   9852,   -492,    492,  -2364,   2364,    -24,     24,
-	-30076,  30076,  -1756,   1756,  -7420,   7420,   -340,    340,
-	-14972,  14972,   -812,    812,  -3644,   3644,   -104,    104,
-	-21884,  21884,  -1244,   1244,  -5372,   5372,   -212,    212,
-	-10876,  10876,   -556,    556,  -2620,   2620,    -40,     40,
-	-25980,  25980,  -1500,   1500,  -6396,   6396,   -276,    276,
-	-12924,  12924,   -684,    684,  -3132,   3132,    -72,     72,
-	-17788,  17788,   -988,    988,  -4348,   4348,   -148,    148,
-	-8828,   8828,   -428,    428,  -2108,   2108,     -8,      8,
-	-31100,  31100,  -1820,   1820,  -7676,   7676,   -356,    356,
-	-15484,  15484,   -844,    844,  -3772,   3772,   -112,    112,
-	-22908,  22908,  -1308,   1308,  -5628,   5628,   -228,    228,
-	-11388,  11388,   -588,    588,  -2748,   2748,    -48,     48,
-	-27004,  27004,  -1564,   1564,  -6652,   6652,   -292,    292,
-	-13436,  13436,   -716,    716,  -3260,   3260,    -80,     80,
-	-18812,  18812,  -1052,   1052,  -4604,   4604,   -164,    164,
-	-9340,   9340,   -460,    460,  -2236,   2236,    -16,     16,
-	-29052,  29052,  -1692,   1692,  -7164,   7164,   -324,    324,
-	-14460,  14460,   -780,    780,  -3516,   3516,    -96,     96,
-	-20860,  20860,  -1180,   1180,  -5116,   5116,   -196,    196,
-	-10364,  10364,   -524,    524,  -2492,   2492,    -32,     32,
-	-24956,  24956,  -1436,   1436,  -6140,   6140,   -260,    260,
-	-12412,  12412,   -652,    652,  -3004,   3004,    -64,     64,
-	-16764,  16764,   -924,    924,  -4092,   4092,   -132,    132,
-	-8316,   8316,   -396,    396,  -1980,   1980,      0,      0
-};
-
-
-/*---------------------------------------------------------------------------*/
-
-static short capidtmf_recv_window_function[CAPIDTMF_RECV_ACCUMULATE_CYCLES] =
-{
-	-500L,   -999L,  -1499L,  -1998L,  -2496L,  -2994L,  -3491L,  -3988L,
-	-4483L,  -4978L,  -5471L,  -5963L,  -6454L,  -6943L,  -7431L,  -7917L,
-	-8401L,  -8883L,  -9363L,  -9840L, -10316L, -10789L, -11259L, -11727L,
-	-12193L, -12655L, -13115L, -13571L, -14024L, -14474L, -14921L, -15364L,
-	-15804L, -16240L, -16672L, -17100L, -17524L, -17944L, -18360L, -18772L,
-	-19180L, -19583L, -19981L, -20375L, -20764L, -21148L, -21527L, -21901L,
-	-22270L, -22634L, -22993L, -23346L, -23694L, -24037L, -24374L, -24705L,
-	-25030L, -25350L, -25664L, -25971L, -26273L, -26568L, -26858L, -27141L,
-	-27418L, -27688L, -27952L, -28210L, -28461L, -28705L, -28943L, -29174L,
-	-29398L, -29615L, -29826L, -30029L, -30226L, -30415L, -30598L, -30773L,
-	-30941L, -31102L, -31256L, -31402L, -31541L, -31673L, -31797L, -31914L,
-	-32024L, -32126L, -32221L, -32308L, -32388L, -32460L, -32524L, -32581L,
-	-32631L, -32673L, -32707L, -32734L, -32753L, -32764L, -32768L, -32764L,
-	-32753L, -32734L, -32707L, -32673L, -32631L, -32581L, -32524L, -32460L,
-	-32388L, -32308L, -32221L, -32126L, -32024L, -31914L, -31797L, -31673L,
-	-31541L, -31402L, -31256L, -31102L, -30941L, -30773L, -30598L, -30415L,
-	-30226L, -30029L, -29826L, -29615L, -29398L, -29174L, -28943L, -28705L,
-	-28461L, -28210L, -27952L, -27688L, -27418L, -27141L, -26858L, -26568L,
-	-26273L, -25971L, -25664L, -25350L, -25030L, -24705L, -24374L, -24037L,
-	-23694L, -23346L, -22993L, -22634L, -22270L, -21901L, -21527L, -21148L,
-	-20764L, -20375L, -19981L, -19583L, -19180L, -18772L, -18360L, -17944L,
-	-17524L, -17100L, -16672L, -16240L, -15804L, -15364L, -14921L, -14474L,
-	-14024L, -13571L, -13115L, -12655L, -12193L, -11727L, -11259L, -10789L,
-	-10316L,  -9840L,  -9363L,  -8883L,  -8401L,  -7917L,  -7431L,  -6943L,
-	-6454L,  -5963L,  -5471L,  -4978L,  -4483L,  -3988L,  -3491L,  -2994L,
-	-2496L,  -1998L,  -1499L,   -999L,   -500L,
-};
-
-static byte capidtmf_leading_zeroes_table[0x100] =
-{
-	8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
-	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-#define capidtmf_byte_leading_zeroes(b)  (capidtmf_leading_zeroes_table[(BYTE)(b)])
-#define capidtmf_word_leading_zeroes(w)  (((w) & 0xff00) ? capidtmf_leading_zeroes_table[(w) >> 8] : 8 + capidtmf_leading_zeroes_table[(w)])
-#define capidtmf_dword_leading_zeroes(d)  (((d) & 0xffff0000L) ?    (((d) & 0xff000000L) ? capidtmf_leading_zeroes_table[(d) >> 24] : 8 + capidtmf_leading_zeroes_table[(d) >> 16]) :    (((d) & 0xff00) ? 16 + capidtmf_leading_zeroes_table[(d) >> 8] : 24 + capidtmf_leading_zeroes_table[(d)]))
-
-
-/*---------------------------------------------------------------------------*/
-
-
-static void capidtmf_goertzel_loop(long *buffer, long *coeffs, short *sample, long count)
-{
-	int i, j;
-	long c, d, q0, q1, q2;
-
-	for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT - 1; i++)
-	{
-		q1 = buffer[i];
-		q2 = buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
-		d = coeffs[i] >> 1;
-		c = d << 1;
-		if (c >= 0)
-		{
-			for (j = 0; j < count; j++)
-			{
-				q0 = sample[j] - q2 + (c * (q1 >> 16)) + (((dword)(((dword) d) * ((dword)(q1 & 0xffff)))) >> 15);
-				q2 = q1;
-				q1 = q0;
-			}
-		}
-		else
-		{
-			c = -c;
-			d = -d;
-			for (j = 0; j < count; j++)
-			{
-				q0 = sample[j] - q2 - ((c * (q1 >> 16)) + (((dword)(((dword) d) * ((dword)(q1 & 0xffff)))) >> 15));
-				q2 = q1;
-				q1 = q0;
-			}
-		}
-		buffer[i] = q1;
-		buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] = q2;
-	}
-	q1 = buffer[i];
-	q2 = buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
-	c = (coeffs[i] >> 1) << 1;
-	if (c >= 0)
-	{
-		for (j = 0; j < count; j++)
-		{
-			q0 = sample[j] - q2 + (c * (q1 >> 16)) + (((dword)(((dword)(c >> 1)) * ((dword)(q1 & 0xffff)))) >> 15);
-			q2 = q1;
-			q1 = q0;
-			c -= CAPIDTMF_RECV_FUNDAMENTAL_DECREMENT;
-		}
-	}
-	else
-	{
-		c = -c;
-		for (j = 0; j < count; j++)
-		{
-			q0 = sample[j] - q2 - ((c * (q1 >> 16)) + (((dword)(((dword)(c >> 1)) * ((dword)(q1 & 0xffff)))) >> 15));
-			q2 = q1;
-			q1 = q0;
-			c += CAPIDTMF_RECV_FUNDAMENTAL_DECREMENT;
-		}
-	}
-	coeffs[i] = c;
-	buffer[i] = q1;
-	buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] = q2;
-}
-
-
-static void capidtmf_goertzel_result(long *buffer, long *coeffs)
-{
-	int i;
-	long d, e, q1, q2, lo, mid, hi;
-	dword k;
-
-	for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
-	{
-		q1 = buffer[i];
-		q2 = buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
-		d = coeffs[i] >> 1;
-		if (d >= 0)
-			d = ((d << 1) * (-q1 >> 16)) + (((dword)(((dword) d) * ((dword)(-q1 & 0xffff)))) >> 15);
-		else
-			d = ((-d << 1) * (-q1 >> 16)) + (((dword)(((dword) -d) * ((dword)(-q1 & 0xffff)))) >> 15);
-		e = (q2 >= 0) ? q2 : -q2;
-		if (d >= 0)
-		{
-			k = ((dword)(d & 0xffff)) * ((dword)(e & 0xffff));
-			lo = k & 0xffff;
-			mid = k >> 16;
-			k = ((dword)(d >> 16)) * ((dword)(e & 0xffff));
-			mid += k & 0xffff;
-			hi = k >> 16;
-			k = ((dword)(d & 0xffff)) * ((dword)(e >> 16));
-			mid += k & 0xffff;
-			hi += k >> 16;
-			hi += ((dword)(d >> 16)) * ((dword)(e >> 16));
-		}
-		else
-		{
-			d = -d;
-			k = ((dword)(d & 0xffff)) * ((dword)(e & 0xffff));
-			lo = -((long)(k & 0xffff));
-			mid = -((long)(k >> 16));
-			k = ((dword)(d >> 16)) * ((dword)(e & 0xffff));
-			mid -= k & 0xffff;
-			hi = -((long)(k >> 16));
-			k = ((dword)(d & 0xffff)) * ((dword)(e >> 16));
-			mid -= k & 0xffff;
-			hi -= k >> 16;
-			hi -= ((dword)(d >> 16)) * ((dword)(e >> 16));
-		}
-		if (q2 < 0)
-		{
-			lo = -lo;
-			mid = -mid;
-			hi = -hi;
-		}
-		d = (q1 >= 0) ? q1 : -q1;
-		k = ((dword)(d & 0xffff)) * ((dword)(d & 0xffff));
-		lo += k & 0xffff;
-		mid += k >> 16;
-		k = ((dword)(d >> 16)) * ((dword)(d & 0xffff));
-		mid += (k & 0xffff) << 1;
-		hi += (k >> 16) << 1;
-		hi += ((dword)(d >> 16)) * ((dword)(d >> 16));
-		d = (q2 >= 0) ? q2 : -q2;
-		k = ((dword)(d & 0xffff)) * ((dword)(d & 0xffff));
-		lo += k & 0xffff;
-		mid += k >> 16;
-		k = ((dword)(d >> 16)) * ((dword)(d & 0xffff));
-		mid += (k & 0xffff) << 1;
-		hi += (k >> 16) << 1;
-		hi += ((dword)(d >> 16)) * ((dword)(d >> 16));
-		mid += lo >> 16;
-		hi += mid >> 16;
-		buffer[i] = (lo & 0xffff) | (mid << 16);
-		buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] = hi;
-	}
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_697     0
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_770     1
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_852     2
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_941     3
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1209    4
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1336    5
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1477    6
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1633    7
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_635     8
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1010    9
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1140    10
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1272    11
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1405    12
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1555    13
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1715    14
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1875    15
-
-#define CAPIDTMF_RECV_GUARD_SNR_DONTCARE      0xc000
-#define CAPIDTMF_RECV_NO_DIGIT                0xff
-#define CAPIDTMF_RECV_TIME_GRANULARITY        (CAPIDTMF_RECV_ACCUMULATE_CYCLES + 1)
-
-#define CAPIDTMF_RECV_INDICATION_DIGIT        0x0001
-
-static long capidtmf_recv_goertzel_coef_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] =
-{
-	0xda97L * 2,  /* 697 Hz (Low group 697 Hz) */
-	0xd299L * 2,  /* 770 Hz (Low group 770 Hz) */
-	0xc8cbL * 2,  /* 852 Hz (Low group 852 Hz) */
-	0xbd36L * 2,  /* 941 Hz (Low group 941 Hz) */
-	0x9501L * 2,  /* 1209 Hz (High group 1209 Hz) */
-	0x7f89L * 2,  /* 1336 Hz (High group 1336 Hz) */
-	0x6639L * 2,  /* 1477 Hz (High group 1477 Hz) */
-	0x48c6L * 2,  /* 1633 Hz (High group 1633 Hz) */
-	0xe14cL * 2,  /* 630 Hz (Lower guard of low group 631 Hz) */
-	0xb2e0L * 2,  /* 1015 Hz (Upper guard of low group 1039 Hz) */
-	0xa1a0L * 2,  /* 1130 Hz (Lower guard of high group 1140 Hz) */
-	0x8a87L * 2,  /* 1272 Hz (Guard between 1209 Hz and 1336 Hz: 1271 Hz) */
-	0x7353L * 2,  /* 1405 Hz (2nd harmonics of 697 Hz and guard between 1336 Hz and 1477 Hz: 1405 Hz) */
-	0x583bL * 2,  /* 1552 Hz (2nd harmonics of 770 Hz and guard between 1477 Hz and 1633 Hz: 1553 Hz) */
-	0x37d8L * 2,  /* 1720 Hz (2nd harmonics of 852 Hz and upper guard of high group: 1715 Hz) */
-	0x0000L * 2   /* 100-630 Hz (fundamentals) */
-};
-
-
-static word capidtmf_recv_guard_snr_low_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] =
-{
-	14,                                    /* Low group peak versus 697 Hz */
-	14,                                    /* Low group peak versus 770 Hz */
-	16,                                    /* Low group peak versus 852 Hz */
-	16,                                    /* Low group peak versus 941 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* Low group peak versus 1209 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* Low group peak versus 1336 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* Low group peak versus 1477 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* Low group peak versus 1633 Hz */
-	14,                                    /* Low group peak versus 635 Hz */
-	16,                                    /* Low group peak versus 1010 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* Low group peak versus 1140 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* Low group peak versus 1272 Hz */
-	DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 8,  /* Low group peak versus 1405 Hz */
-	DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 4,  /* Low group peak versus 1555 Hz */
-	DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 4,  /* Low group peak versus 1715 Hz */
-	12                                     /* Low group peak versus 100-630 Hz */
-};
-
-
-static word capidtmf_recv_guard_snr_high_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] =
-{
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* High group peak versus 697 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* High group peak versus 770 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* High group peak versus 852 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* High group peak versus 941 Hz */
-	20,                                    /* High group peak versus 1209 Hz */
-	20,                                    /* High group peak versus 1336 Hz */
-	20,                                    /* High group peak versus 1477 Hz */
-	20,                                    /* High group peak versus 1633 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* High group peak versus 635 Hz */
-	CAPIDTMF_RECV_GUARD_SNR_DONTCARE,      /* High group peak versus 1010 Hz */
-	16,                                    /* High group peak versus 1140 Hz */
-	4,                                     /* High group peak versus 1272 Hz */
-	6,                                     /* High group peak versus 1405 Hz */
-	8,                                     /* High group peak versus 1555 Hz */
-	16,                                    /* High group peak versus 1715 Hz */
-	12                                     /* High group peak versus 100-630 Hz */
-};
-
-
-/*---------------------------------------------------------------------------*/
-
-static void capidtmf_recv_init(t_capidtmf_state *p_state)
-{
-	p_state->recv.min_gap_duration = 1;
-	p_state->recv.min_digit_duration = 1;
-
-	p_state->recv.cycle_counter = 0;
-	p_state->recv.current_digit_on_time = 0;
-	p_state->recv.current_digit_off_time = 0;
-	p_state->recv.current_digit_value = CAPIDTMF_RECV_NO_DIGIT;
-
-	p_state->recv.digit_write_pos = 0;
-	p_state->recv.digit_read_pos = 0;
-	p_state->recv.indication_state = 0;
-	p_state->recv.indication_state_ack = 0;
-	p_state->recv.state = CAPIDTMF_RECV_STATE_IDLE;
-}
-
-
-void capidtmf_recv_enable(t_capidtmf_state *p_state, word min_digit_duration, word min_gap_duration)
-{
-	p_state->recv.indication_state_ack &= CAPIDTMF_RECV_INDICATION_DIGIT;
-	p_state->recv.min_digit_duration = (word)(((((dword) min_digit_duration) * 8) +
-						   ((dword)(CAPIDTMF_RECV_TIME_GRANULARITY / 2))) / ((dword) CAPIDTMF_RECV_TIME_GRANULARITY));
-	if (p_state->recv.min_digit_duration <= 1)
-		p_state->recv.min_digit_duration = 1;
-	else
-		(p_state->recv.min_digit_duration)--;
-	p_state->recv.min_gap_duration =
-		(word)((((dword) min_gap_duration) * 8) / ((dword) CAPIDTMF_RECV_TIME_GRANULARITY));
-	if (p_state->recv.min_gap_duration <= 1)
-		p_state->recv.min_gap_duration = 1;
-	else
-		(p_state->recv.min_gap_duration)--;
-	p_state->recv.state |= CAPIDTMF_RECV_STATE_DTMF_ACTIVE;
-}
-
-
-void capidtmf_recv_disable(t_capidtmf_state *p_state)
-{
-	p_state->recv.state &= ~CAPIDTMF_RECV_STATE_DTMF_ACTIVE;
-	if (p_state->recv.state == CAPIDTMF_RECV_STATE_IDLE)
-		capidtmf_recv_init(p_state);
-	else
-	{
-		p_state->recv.cycle_counter = 0;
-		p_state->recv.current_digit_on_time = 0;
-		p_state->recv.current_digit_off_time = 0;
-		p_state->recv.current_digit_value = CAPIDTMF_RECV_NO_DIGIT;
-	}
-}
-
-
-word capidtmf_recv_indication(t_capidtmf_state *p_state, byte *buffer)
-{
-	word i, j, k, flags;
-
-	flags = p_state->recv.indication_state ^ p_state->recv.indication_state_ack;
-	p_state->recv.indication_state_ack ^= flags & CAPIDTMF_RECV_INDICATION_DIGIT;
-	if (p_state->recv.digit_write_pos != p_state->recv.digit_read_pos)
-	{
-		i = 0;
-		k = p_state->recv.digit_write_pos;
-		j = p_state->recv.digit_read_pos;
-		do
-		{
-			buffer[i++] = p_state->recv.digit_buffer[j];
-			j = (j == CAPIDTMF_RECV_DIGIT_BUFFER_SIZE - 1) ? 0 : j + 1;
-		} while (j != k);
-		p_state->recv.digit_read_pos = k;
-		return (i);
-	}
-	p_state->recv.indication_state_ack ^= flags;
-	return (0);
-}
-
-
-#define CAPIDTMF_RECV_WINDOWED_SAMPLES  32
-
-void capidtmf_recv_block(t_capidtmf_state *p_state, byte *buffer, word length)
-{
-	byte result_digit;
-	word sample_number, cycle_counter, n, i;
-	word low_peak, high_peak;
-	dword lo, hi;
-	byte   *p;
-	short *q;
-	byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
-	short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
-
-
-	if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
-	{
-		cycle_counter = p_state->recv.cycle_counter;
-		sample_number = 0;
-		while (sample_number < length)
-		{
-			if (cycle_counter < CAPIDTMF_RECV_ACCUMULATE_CYCLES)
-			{
-				if (cycle_counter == 0)
-				{
-					for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
-					{
-						p_state->recv.goertzel_buffer[0][i] = 0;
-						p_state->recv.goertzel_buffer[1][i] = 0;
-					}
-				}
-				n = CAPIDTMF_RECV_ACCUMULATE_CYCLES - cycle_counter;
-				if (n > length - sample_number)
-					n = length - sample_number;
-				if (n > CAPIDTMF_RECV_WINDOWED_SAMPLES)
-					n = CAPIDTMF_RECV_WINDOWED_SAMPLES;
-				p = buffer + sample_number;
-				q = capidtmf_recv_window_function + cycle_counter;
-				if (p_state->ulaw)
-				{
-					for (i = 0; i < n; i++)
-					{
-						windowed_sample_buffer[i] =
-							(short)((capidtmf_expand_table_ulaw[p[i]] * ((long)(q[i]))) >> 15);
-					}
-				}
-				else
-				{
-					for (i = 0; i < n; i++)
-					{
-						windowed_sample_buffer[i] =
-							(short)((capidtmf_expand_table_alaw[p[i]] * ((long)(q[i]))) >> 15);
-					}
-				}
-				capidtmf_recv_goertzel_coef_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT - 1] = CAPIDTMF_RECV_FUNDAMENTAL_OFFSET;
-				capidtmf_goertzel_loop(p_state->recv.goertzel_buffer[0],
-						       capidtmf_recv_goertzel_coef_table, windowed_sample_buffer, n);
-				cycle_counter += n;
-				sample_number += n;
-			}
-			else
-			{
-				capidtmf_goertzel_result(p_state->recv.goertzel_buffer[0],
-							 capidtmf_recv_goertzel_coef_table);
-				for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
-				{
-					lo = (dword)(p_state->recv.goertzel_buffer[0][i]);
-					hi = (dword)(p_state->recv.goertzel_buffer[1][i]);
-					if (hi != 0)
-					{
-						n = capidtmf_dword_leading_zeroes(hi);
-						hi = (hi << n) | (lo >> (32 - n));
-					}
-					else
-					{
-						n = capidtmf_dword_leading_zeroes(lo);
-						hi = lo << n;
-						n += 32;
-					}
-					n = 195 - 3 * n;
-					if (hi >= 0xcb300000L)
-						n += 2;
-					else if (hi >= 0xa1450000L)
-						n++;
-					goertzel_result_buffer[i] = (byte) n;
-				}
-				low_peak = DSPDTMF_RX_SENSITIVITY_LOW_DEFAULT;
-				result_digit = CAPIDTMF_RECV_NO_DIGIT;
-				for (i = 0; i < CAPIDTMF_LOW_GROUP_FREQUENCIES; i++)
-				{
-					if (goertzel_result_buffer[i] > low_peak)
-					{
-						low_peak = goertzel_result_buffer[i];
-						result_digit = (byte) i;
-					}
-				}
-				high_peak = DSPDTMF_RX_SENSITIVITY_HIGH_DEFAULT;
-				n = CAPIDTMF_RECV_NO_DIGIT;
-				for (i = CAPIDTMF_LOW_GROUP_FREQUENCIES; i < CAPIDTMF_RECV_BASE_FREQUENCY_COUNT; i++)
-				{
-					if (goertzel_result_buffer[i] > high_peak)
-					{
-						high_peak = goertzel_result_buffer[i];
-						n = (i - CAPIDTMF_LOW_GROUP_FREQUENCIES) << 2;
-					}
-				}
-				result_digit |= (byte) n;
-				if (low_peak + DSPDTMF_RX_HIGH_EXCEEDING_LOW_DEFAULT < high_peak)
-					result_digit = CAPIDTMF_RECV_NO_DIGIT;
-				if (high_peak + DSPDTMF_RX_LOW_EXCEEDING_HIGH_DEFAULT < low_peak)
-					result_digit = CAPIDTMF_RECV_NO_DIGIT;
-				n = 0;
-				for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
-				{
-					if ((((short)(low_peak - goertzel_result_buffer[i] - capidtmf_recv_guard_snr_low_table[i])) < 0)
-					    || (((short)(high_peak - goertzel_result_buffer[i] - capidtmf_recv_guard_snr_high_table[i])) < 0))
-					{
-						n++;
-					}
-				}
-				if (n != 2)
-					result_digit = CAPIDTMF_RECV_NO_DIGIT;
-
-				if (result_digit == CAPIDTMF_RECV_NO_DIGIT)
-				{
-					if (p_state->recv.current_digit_on_time != 0)
-					{
-						if (++(p_state->recv.current_digit_off_time) >= p_state->recv.min_gap_duration)
-						{
-							p_state->recv.current_digit_on_time = 0;
-							p_state->recv.current_digit_off_time = 0;
-						}
-					}
-					else
-					{
-						if (p_state->recv.current_digit_off_time != 0)
-							(p_state->recv.current_digit_off_time)--;
-					}
-				}
-				else
-				{
-					if ((p_state->recv.current_digit_on_time == 0)
-					    && (p_state->recv.current_digit_off_time != 0))
-					{
-						(p_state->recv.current_digit_off_time)--;
-					}
-					else
-					{
-						n = p_state->recv.current_digit_off_time;
-						if ((p_state->recv.current_digit_on_time != 0)
-						    && (result_digit != p_state->recv.current_digit_value))
-						{
-							p_state->recv.current_digit_on_time = 0;
-							n = 0;
-						}
-						p_state->recv.current_digit_value = result_digit;
-						p_state->recv.current_digit_off_time = 0;
-						if (p_state->recv.current_digit_on_time != 0xffff)
-						{
-							p_state->recv.current_digit_on_time += n + 1;
-							if (p_state->recv.current_digit_on_time >= p_state->recv.min_digit_duration)
-							{
-								p_state->recv.current_digit_on_time = 0xffff;
-								i = (p_state->recv.digit_write_pos == CAPIDTMF_RECV_DIGIT_BUFFER_SIZE - 1) ?
-									0 : p_state->recv.digit_write_pos + 1;
-								if (i == p_state->recv.digit_read_pos)
-								{
-									trace(dprintf("%s,%d: Receive digit overrun",
-										      (char *)(FILE_), __LINE__));
-								}
-								else
-								{
-									p_state->recv.digit_buffer[p_state->recv.digit_write_pos] = result_digit;
-									p_state->recv.digit_write_pos = i;
-									p_state->recv.indication_state =
-										(p_state->recv.indication_state & ~CAPIDTMF_RECV_INDICATION_DIGIT) |
-										(~p_state->recv.indication_state_ack & CAPIDTMF_RECV_INDICATION_DIGIT);
-								}
-							}
-						}
-					}
-				}
-				cycle_counter = 0;
-				sample_number++;
-			}
-		}
-		p_state->recv.cycle_counter = cycle_counter;
-	}
-}
-
-
-void capidtmf_init(t_capidtmf_state *p_state, byte ulaw)
-{
-	p_state->ulaw = ulaw;
-	capidtmf_recv_init(p_state);
-}
-
-
-/*---------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/capidtmf.h b/drivers/isdn/hardware/eicon/capidtmf.h
deleted file mode 100644
index 0a9cf59..0000000
--- a/drivers/isdn/hardware/eicon/capidtmf.h
+++ /dev/null
@@ -1,79 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef CAPIDTMF_H_
-#define CAPIDTMF_H_
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-#define CAPIDTMF_TONE_GROUP_COUNT            2
-#define CAPIDTMF_LOW_GROUP_FREQUENCIES       4
-#define CAPIDTMF_HIGH_GROUP_FREQUENCIES      4
-#define DSPDTMF_RX_SENSITIVITY_LOW_DEFAULT	50	/* -52 dBm */
-#define DSPDTMF_RX_SENSITIVITY_HIGH_DEFAULT	50	/* -52 dBm */
-#define DSPDTMF_RX_HIGH_EXCEEDING_LOW_DEFAULT	10	/* dB */
-#define DSPDTMF_RX_LOW_EXCEEDING_HIGH_DEFAULT	10	/* dB */
-#define DSPDTMF_RX_HARMONICS_SEL_DEFAULT	12	/* dB */
-#define CAPIDTMF_RECV_BASE_FREQUENCY_COUNT   (CAPIDTMF_LOW_GROUP_FREQUENCIES + CAPIDTMF_HIGH_GROUP_FREQUENCIES)
-#define CAPIDTMF_RECV_GUARD_FREQUENCY_COUNT  8
-#define CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT  (CAPIDTMF_RECV_BASE_FREQUENCY_COUNT + CAPIDTMF_RECV_GUARD_FREQUENCY_COUNT)
-#define CAPIDTMF_RECV_POSITIVE_COEFF_COUNT   16
-#define CAPIDTMF_RECV_NEGATIVE_COEFF_COUNT   (CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT - CAPIDTMF_RECV_POSITIVE_COEFF_COUNT)
-#define CAPIDTMF_RECV_ACCUMULATE_CYCLES      205
-#define CAPIDTMF_RECV_FUNDAMENTAL_OFFSET     (0xff35L * 2)
-#define CAPIDTMF_RECV_FUNDAMENTAL_DECREMENT  (0x0028L * 2)
-#define CAPIDTMF_RECV_DIGIT_BUFFER_SIZE      32
-#define CAPIDTMF_RECV_STATE_IDLE             0x00
-#define CAPIDTMF_RECV_STATE_DTMF_ACTIVE      0x01
-typedef struct tag_capidtmf_recv_state
-{
-	byte digit_buffer[CAPIDTMF_RECV_DIGIT_BUFFER_SIZE];
-	word digit_write_pos;
-	word digit_read_pos;
-	word indication_state;
-	word indication_state_ack;
-	long goertzel_buffer[2][CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
-	word min_gap_duration;
-	word min_digit_duration;
-	word cycle_counter;
-	word current_digit_on_time;
-	word current_digit_off_time;
-	byte current_digit_value;
-	byte state;
-} t_capidtmf_recv_state;
-typedef struct tag_capidtmf_state
-{
-	byte ulaw;
-	t_capidtmf_recv_state recv;
-} t_capidtmf_state;
-word capidtmf_recv_indication(t_capidtmf_state *p_state, byte *buffer);
-void capidtmf_recv_block(t_capidtmf_state *p_state, byte *buffer, word length);
-void capidtmf_init(t_capidtmf_state *p_state, byte ulaw);
-void capidtmf_recv_enable(t_capidtmf_state *p_state, word min_digit_duration, word min_gap_duration);
-void capidtmf_recv_disable(t_capidtmf_state *p_state);
-#define capidtmf_indication(p_state, buffer)  (((p_state)->recv.indication_state != (p_state)->recv.indication_state_ack) ? capidtmf_recv_indication(p_state, buffer) : 0)
-#define capidtmf_recv_process_block(p_state, buffer, length)  { if ((p_state)->recv.state != CAPIDTMF_RECV_STATE_IDLE) capidtmf_recv_block(p_state, buffer, length); }
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-#endif
diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
deleted file mode 100644
index 7a0bdbd..0000000
--- a/drivers/isdn/hardware/eicon/capifunc.c
+++ /dev/null
@@ -1,1219 +0,0 @@
-/* $Id: capifunc.c,v 1.61.4.7 2005/02/11 19:40:25 armin Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface common functions
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include "platform.h"
-#include "os_capi.h"
-#include "di_defs.h"
-#include "capi20.h"
-#include "divacapi.h"
-#include "divasync.h"
-#include "capifunc.h"
-
-#define DBG_MINIMUM  (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT  (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-DIVA_CAPI_ADAPTER *adapter = (DIVA_CAPI_ADAPTER *) NULL;
-APPL *application = (APPL *) NULL;
-byte max_appl = MAX_APPL;
-byte max_adapter = 0;
-static CAPI_MSG *mapped_msg = (CAPI_MSG *) NULL;
-
-byte UnMapController(byte);
-char DRIVERRELEASE_CAPI[32];
-
-extern void AutomaticLaw(DIVA_CAPI_ADAPTER *);
-extern void callback(ENTITY *);
-extern word api_remove_start(void);
-extern word CapiRelease(word);
-extern word CapiRegister(word);
-extern word api_put(APPL *, CAPI_MSG *);
-
-static diva_os_spin_lock_t api_lock;
-
-static LIST_HEAD(cards);
-
-static dword notify_handle;
-static void DIRequest(ENTITY *e);
-static DESCRIPTOR MAdapter;
-static DESCRIPTOR DAdapter;
-static byte ControllerMap[MAX_DESCRIPTORS + 1];
-
-
-static void diva_register_appl(struct capi_ctr *, __u16,
-			       capi_register_params *);
-static void diva_release_appl(struct capi_ctr *, __u16);
-static char *diva_procinfo(struct capi_ctr *);
-static u16 diva_send_message(struct capi_ctr *,
-			     diva_os_message_buffer_s *);
-extern void diva_os_set_controller_struct(struct capi_ctr *);
-
-extern void DIVA_DIDD_Read(DESCRIPTOR *, int);
-
-/*
- * debug
- */
-static void no_printf(unsigned char *, ...);
-#include "debuglib.c"
-static void xlog(char *x, ...)
-{
-#ifndef DIVA_NO_DEBUGLIB
-	va_list ap;
-	if (myDriverDebugHandle.dbgMask & DL_XLOG) {
-		va_start(ap, x);
-		if (myDriverDebugHandle.dbg_irq) {
-			myDriverDebugHandle.dbg_irq(myDriverDebugHandle.id,
-						    DLI_XLOG, x, ap);
-		} else if (myDriverDebugHandle.dbg_old) {
-			myDriverDebugHandle.dbg_old(myDriverDebugHandle.id,
-						    x, ap);
-		}
-		va_end(ap);
-	}
-#endif
-}
-
-/*
- * info for proc
- */
-static char *diva_procinfo(struct capi_ctr *ctrl)
-{
-	return (ctrl->serial);
-}
-
-/*
- * stop debugging
- */
-static void stop_dbg(void)
-{
-	DbgDeregister();
-	memset(&MAdapter, 0, sizeof(MAdapter));
-	dprintf = no_printf;
-}
-
-/*
- * dummy debug function
- */
-static void no_printf(unsigned char *x, ...)
-{
-}
-
-/*
- * Controller mapping
- */
-byte MapController(byte Controller)
-{
-	byte i;
-	byte MappedController = 0;
-	byte ctrl = Controller & 0x7f;	/* mask external controller bit off */
-
-	for (i = 1; i < max_adapter + 1; i++) {
-		if (ctrl == ControllerMap[i]) {
-			MappedController = (byte) i;
-			break;
-		}
-	}
-	if (i > max_adapter) {
-		ControllerMap[0] = ctrl;
-		MappedController = 0;
-	}
-	return (MappedController | (Controller & 0x80));	/* put back external controller bit */
-}
-
-/*
- * Controller unmapping
- */
-byte UnMapController(byte MappedController)
-{
-	byte Controller;
-	byte ctrl = MappedController & 0x7f;	/* mask external controller bit off */
-
-	if (ctrl <= max_adapter) {
-		Controller = ControllerMap[ctrl];
-	} else {
-		Controller = 0;
-	}
-
-	return (Controller | (MappedController & 0x80));	/* put back external controller bit */
-}
-
-/*
- * find a new free id
- */
-static int find_free_id(void)
-{
-	int num = 0;
-	DIVA_CAPI_ADAPTER *a;
-
-	while (num < MAX_DESCRIPTORS) {
-		a = &adapter[num];
-		if (!a->Id)
-			break;
-		num++;
-	}
-	return (num + 1);
-}
-
-/*
- * find a card structure by controller number
- */
-static diva_card *find_card_by_ctrl(word controller)
-{
-	struct list_head *tmp;
-	diva_card *card;
-
-	list_for_each(tmp, &cards) {
-		card = list_entry(tmp, diva_card, list);
-		if (ControllerMap[card->Id] == controller) {
-			if (card->remove_in_progress)
-				card = NULL;
-			return (card);
-		}
-	}
-	return (diva_card *) 0;
-}
-
-/*
- * Buffer RX/TX
- */
-void *TransmitBufferSet(APPL *appl, dword ref)
-{
-	appl->xbuffer_used[ref] = true;
-	DBG_PRV1(("%d:xbuf_used(%d)", appl->Id, ref + 1))
-		return (void *)(long)ref;
-}
-
-void *TransmitBufferGet(APPL *appl, void *p)
-{
-	if (appl->xbuffer_internal[(dword)(long)p])
-		return appl->xbuffer_internal[(dword)(long)p];
-
-	return appl->xbuffer_ptr[(dword)(long)p];
-}
-
-void TransmitBufferFree(APPL *appl, void *p)
-{
-	appl->xbuffer_used[(dword)(long)p] = false;
-	DBG_PRV1(("%d:xbuf_free(%d)", appl->Id, ((dword)(long)p) + 1))
-		}
-
-void *ReceiveBufferGet(APPL *appl, int Num)
-{
-	return &appl->ReceiveBuffer[Num * appl->MaxDataLength];
-}
-
-/*
- * api_remove_start/complete for cleanup
- */
-void api_remove_complete(void)
-{
-	DBG_PRV1(("api_remove_complete"))
-		}
-
-/*
- * main function called by message.c
- */
-void sendf(APPL *appl, word command, dword Id, word Number, byte *format, ...)
-{
-	word i, j;
-	word length = 12, dlength = 0;
-	byte *write;
-	CAPI_MSG msg;
-	byte *string = NULL;
-	va_list ap;
-	diva_os_message_buffer_s *dmb;
-	diva_card *card = NULL;
-	dword tmp;
-
-	if (!appl)
-		return;
-
-	DBG_PRV1(("sendf(a=%d,cmd=%x,format=%s)",
-		  appl->Id, command, (byte *) format))
-
-		PUT_WORD(&msg.header.appl_id, appl->Id);
-	PUT_WORD(&msg.header.command, command);
-	if ((byte) (command >> 8) == 0x82)
-		Number = appl->Number++;
-	PUT_WORD(&msg.header.number, Number);
-
-	PUT_DWORD(&msg.header.controller, Id);
-	write = (byte *)&msg;
-	write += 12;
-
-	va_start(ap, format);
-	for (i = 0; format[i]; i++) {
-		switch (format[i]) {
-		case 'b':
-			tmp = va_arg(ap, dword);
-			*(byte *) write = (byte) (tmp & 0xff);
-			write += 1;
-			length += 1;
-			break;
-		case 'w':
-			tmp = va_arg(ap, dword);
-			PUT_WORD(write, (tmp & 0xffff));
-			write += 2;
-			length += 2;
-			break;
-		case 'd':
-			tmp = va_arg(ap, dword);
-			PUT_DWORD(write, tmp);
-			write += 4;
-			length += 4;
-			break;
-		case 's':
-		case 'S':
-			string = va_arg(ap, byte *);
-			length += string[0] + 1;
-			for (j = 0; j <= string[0]; j++)
-				*write++ = string[j];
-			break;
-		}
-	}
-	va_end(ap);
-
-	PUT_WORD(&msg.header.length, length);
-	msg.header.controller = UnMapController(msg.header.controller);
-
-	if (command == _DATA_B3_I)
-		dlength = GET_WORD(
-			((byte *)&msg.info.data_b3_ind.Data_Length));
-
-	if (!(dmb = diva_os_alloc_message_buffer(length + dlength,
-						 (void **) &write))) {
-		DBG_ERR(("sendf: alloc_message_buffer failed, incoming msg dropped."))
-			return;
-	}
-
-	/* copy msg header to sk_buff */
-	memcpy(write, (byte *)&msg, length);
-
-	/* if DATA_B3_IND, copy data too */
-	if (command == _DATA_B3_I) {
-		dword data = GET_DWORD(&msg.info.data_b3_ind.Data);
-		memcpy(write + length, (void *)(long)data, dlength);
-	}
-
-#ifndef DIVA_NO_DEBUGLIB
-	if (myDriverDebugHandle.dbgMask & DL_XLOG) {
-		switch (command) {
-		default:
-			xlog("\x00\x02", &msg, 0x81, length);
-			break;
-		case _DATA_B3_R | CONFIRM:
-			if (myDriverDebugHandle.dbgMask & DL_BLK)
-				xlog("\x00\x02", &msg, 0x81, length);
-			break;
-		case _DATA_B3_I:
-			if (myDriverDebugHandle.dbgMask & DL_BLK) {
-				xlog("\x00\x02", &msg, 0x81, length);
-				for (i = 0; i < dlength; i += 256) {
-					DBG_BLK((((char *)(long)GET_DWORD(&msg.info.data_b3_ind.Data)) + i,
-						 ((dlength - i) < 256) ? (dlength - i) : 256))
-						if (!(myDriverDebugHandle.dbgMask & DL_PRV0))
-							break; /* not more if not explicitly requested */
-				}
-			}
-			break;
-		}
-	}
-#endif
-
-	/* find the card structure for this controller */
-	if (!(card = find_card_by_ctrl(write[8] & 0x7f))) {
-		DBG_ERR(("sendf - controller %d not found, incoming msg dropped",
-			 write[8] & 0x7f))
-			diva_os_free_message_buffer(dmb);
-		return;
-	}
-	/* send capi msg to capi layer */
-	capi_ctr_handle_message(&card->capi_ctrl, appl->Id, dmb);
-}
-
-/*
- * cleanup adapter
- */
-static void clean_adapter(int id, struct list_head *free_mem_q)
-{
-	DIVA_CAPI_ADAPTER *a;
-	int i, k;
-
-	a = &adapter[id];
-	k = li_total_channels - a->li_channels;
-	if (k == 0) {
-		if (li_config_table) {
-			list_add((struct list_head *)li_config_table, free_mem_q);
-			li_config_table = NULL;
-		}
-	} else {
-		if (a->li_base < k) {
-			memmove(&li_config_table[a->li_base],
-				&li_config_table[a->li_base + a->li_channels],
-				(k - a->li_base) * sizeof(LI_CONFIG));
-			for (i = 0; i < k; i++) {
-				memmove(&li_config_table[i].flag_table[a->li_base],
-					&li_config_table[i].flag_table[a->li_base + a->li_channels],
-					k - a->li_base);
-				memmove(&li_config_table[i].
-					coef_table[a->li_base],
-					&li_config_table[i].coef_table[a->li_base + a->li_channels],
-					k - a->li_base);
-			}
-		}
-	}
-	li_total_channels = k;
-	for (i = id; i < max_adapter; i++) {
-		if (adapter[i].request)
-			adapter[i].li_base -= a->li_channels;
-	}
-	if (a->plci)
-		list_add((struct list_head *)a->plci, free_mem_q);
-
-	memset(a, 0x00, sizeof(DIVA_CAPI_ADAPTER));
-	while ((max_adapter != 0) && !adapter[max_adapter - 1].request)
-		max_adapter--;
-}
-
-/*
- * remove a card, but ensures consistent state of LI tables
- * in the time adapter is removed
- */
-static void divacapi_remove_card(DESCRIPTOR *d)
-{
-	diva_card *card = NULL;
-	diva_os_spin_lock_magic_t old_irql;
-	LIST_HEAD(free_mem_q);
-	struct list_head *link;
-	struct list_head *tmp;
-
-	/*
-	 * Set "remove in progress flag".
-	 * Ensures that there is no call from sendf to CAPI in
-	 * the time CAPI controller is about to be removed.
-	 */
-	diva_os_enter_spin_lock(&api_lock, &old_irql, "remove card");
-	list_for_each(tmp, &cards) {
-		card = list_entry(tmp, diva_card, list);
-		if (card->d.request == d->request) {
-			card->remove_in_progress = 1;
-			list_del(tmp);
-			break;
-		}
-	}
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "remove card");
-
-	if (card) {
-		/*
-		 * Detach CAPI. Sendf cannot call to CAPI any more.
-		 * After detach no call to send_message() is done too.
-		 */
-		detach_capi_ctr(&card->capi_ctrl);
-
-		/*
-		 * Now get API lock (to ensure stable state of LI tables)
-		 * and update the adapter map/LI table.
-		 */
-		diva_os_enter_spin_lock(&api_lock, &old_irql, "remove card");
-
-		clean_adapter(card->Id - 1, &free_mem_q);
-		DBG_TRC(("DelAdapterMap (%d) -> (%d)",
-			 ControllerMap[card->Id], card->Id))
-			ControllerMap[card->Id] = 0;
-		DBG_TRC(("adapter remove, max_adapter=%d",
-			 max_adapter));
-		diva_os_leave_spin_lock(&api_lock, &old_irql, "remove card");
-
-		/* After releasing the lock, we can free the memory */
-		diva_os_free(0, card);
-	}
-
-	/* free queued memory areas */
-	list_for_each_safe(link, tmp, &free_mem_q) {
-		list_del(link);
-		diva_os_free(0, link);
-	}
-}
-
-/*
- * remove cards
- */
-static void divacapi_remove_cards(void)
-{
-	DESCRIPTOR d;
-	struct list_head *tmp;
-	diva_card *card;
-	diva_os_spin_lock_magic_t old_irql;
-
-rescan:
-	diva_os_enter_spin_lock(&api_lock, &old_irql, "remove cards");
-	list_for_each(tmp, &cards) {
-		card = list_entry(tmp, diva_card, list);
-		diva_os_leave_spin_lock(&api_lock, &old_irql, "remove cards");
-		d.request = card->d.request;
-		divacapi_remove_card(&d);
-		goto rescan;
-	}
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "remove cards");
-}
-
-/*
- * sync_callback
- */
-static void sync_callback(ENTITY *e)
-{
-	diva_os_spin_lock_magic_t old_irql;
-
-	DBG_TRC(("cb:Id=%x,Rc=%x,Ind=%x", e->Id, e->Rc, e->Ind))
-
-		diva_os_enter_spin_lock(&api_lock, &old_irql, "sync_callback");
-	callback(e);
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "sync_callback");
-}
-
-/*
- * add a new card
- */
-static int diva_add_card(DESCRIPTOR *d)
-{
-	int k = 0, i = 0;
-	diva_os_spin_lock_magic_t old_irql;
-	diva_card *card = NULL;
-	struct capi_ctr *ctrl = NULL;
-	DIVA_CAPI_ADAPTER *a = NULL;
-	IDI_SYNC_REQ sync_req;
-	char serial[16];
-	void *mem_to_free;
-	LI_CONFIG *new_li_config_table;
-	int j;
-
-	if (!(card = (diva_card *) diva_os_malloc(0, sizeof(diva_card)))) {
-		DBG_ERR(("diva_add_card: failed to allocate card struct."))
-			return (0);
-	}
-	memset((char *) card, 0x00, sizeof(diva_card));
-	memcpy(&card->d, d, sizeof(DESCRIPTOR));
-	sync_req.GetName.Req = 0;
-	sync_req.GetName.Rc = IDI_SYNC_REQ_GET_NAME;
-	card->d.request((ENTITY *)&sync_req);
-	strlcpy(card->name, sync_req.GetName.name, sizeof(card->name));
-	ctrl = &card->capi_ctrl;
-	strcpy(ctrl->name, card->name);
-	ctrl->register_appl = diva_register_appl;
-	ctrl->release_appl = diva_release_appl;
-	ctrl->send_message = diva_send_message;
-	ctrl->procinfo = diva_procinfo;
-	ctrl->driverdata = card;
-	diva_os_set_controller_struct(ctrl);
-
-	if (attach_capi_ctr(ctrl)) {
-		DBG_ERR(("diva_add_card: failed to attach controller."))
-			diva_os_free(0, card);
-		return (0);
-	}
-
-	diva_os_enter_spin_lock(&api_lock, &old_irql, "find id");
-	card->Id = find_free_id();
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "find id");
-
-	strlcpy(ctrl->manu, M_COMPANY, sizeof(ctrl->manu));
-	ctrl->version.majorversion = 2;
-	ctrl->version.minorversion = 0;
-	ctrl->version.majormanuversion = DRRELMAJOR;
-	ctrl->version.minormanuversion = DRRELMINOR;
-	sync_req.GetSerial.Req = 0;
-	sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL;
-	sync_req.GetSerial.serial = 0;
-	card->d.request((ENTITY *)&sync_req);
-	if ((i = ((sync_req.GetSerial.serial & 0xff000000) >> 24))) {
-		sprintf(serial, "%ld-%d",
-			sync_req.GetSerial.serial & 0x00ffffff, i + 1);
-	} else {
-		sprintf(serial, "%ld", sync_req.GetSerial.serial);
-	}
-	serial[CAPI_SERIAL_LEN - 1] = 0;
-	strlcpy(ctrl->serial, serial, sizeof(ctrl->serial));
-
-	a = &adapter[card->Id - 1];
-	card->adapter = a;
-	a->os_card = card;
-	ControllerMap[card->Id] = (byte) (ctrl->cnr);
-
-	DBG_TRC(("AddAdapterMap (%d) -> (%d)", ctrl->cnr, card->Id))
-
-		sync_req.xdi_capi_prms.Req = 0;
-	sync_req.xdi_capi_prms.Rc = IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS;
-	sync_req.xdi_capi_prms.info.structure_length =
-		sizeof(diva_xdi_get_capi_parameters_t);
-	card->d.request((ENTITY *)&sync_req);
-	a->flag_dynamic_l1_down =
-		sync_req.xdi_capi_prms.info.flag_dynamic_l1_down;
-	a->group_optimization_enabled =
-		sync_req.xdi_capi_prms.info.group_optimization_enabled;
-	a->request = DIRequest;	/* card->d.request; */
-	a->max_plci = card->d.channels + 30;
-	a->max_listen = (card->d.channels > 2) ? 8 : 2;
-	if (!
-	    (a->plci =
-	     (PLCI *) diva_os_malloc(0, sizeof(PLCI) * a->max_plci))) {
-		DBG_ERR(("diva_add_card: failed alloc plci struct."))
-			memset(a, 0, sizeof(DIVA_CAPI_ADAPTER));
-		return (0);
-	}
-	memset(a->plci, 0, sizeof(PLCI) * a->max_plci);
-
-	for (k = 0; k < a->max_plci; k++) {
-		a->Id = (byte) card->Id;
-		a->plci[k].Sig.callback = sync_callback;
-		a->plci[k].Sig.XNum = 1;
-		a->plci[k].Sig.X = a->plci[k].XData;
-		a->plci[k].Sig.user[0] = (word) (card->Id - 1);
-		a->plci[k].Sig.user[1] = (word) k;
-		a->plci[k].NL.callback = sync_callback;
-		a->plci[k].NL.XNum = 1;
-		a->plci[k].NL.X = a->plci[k].XData;
-		a->plci[k].NL.user[0] = (word) ((card->Id - 1) | 0x8000);
-		a->plci[k].NL.user[1] = (word) k;
-		a->plci[k].adapter = a;
-	}
-
-	a->profile.Number = card->Id;
-	a->profile.Channels = card->d.channels;
-	if (card->d.features & DI_FAX3) {
-		a->profile.Global_Options = 0x71;
-		if (card->d.features & DI_CODEC)
-			a->profile.Global_Options |= 0x6;
-#if IMPLEMENT_DTMF
-		a->profile.Global_Options |= 0x8;
-#endif				/* IMPLEMENT_DTMF */
-		a->profile.Global_Options |= 0x80; /* Line Interconnect */
-#if IMPLEMENT_ECHO_CANCELLER
-		a->profile.Global_Options |= 0x100;
-#endif				/* IMPLEMENT_ECHO_CANCELLER */
-		a->profile.B1_Protocols = 0xdf;
-		a->profile.B2_Protocols = 0x1fdb;
-		a->profile.B3_Protocols = 0xb7;
-		a->manufacturer_features = MANUFACTURER_FEATURE_HARDDTMF;
-	} else {
-		a->profile.Global_Options = 0x71;
-		if (card->d.features & DI_CODEC)
-			a->profile.Global_Options |= 0x2;
-		a->profile.B1_Protocols = 0x43;
-		a->profile.B2_Protocols = 0x1f0f;
-		a->profile.B3_Protocols = 0x07;
-		a->manufacturer_features = 0;
-	}
-
-	a->li_pri = (a->profile.Channels > 2);
-	a->li_channels = a->li_pri ? MIXER_CHANNELS_PRI : MIXER_CHANNELS_BRI;
-	a->li_base = 0;
-	for (i = 0; &adapter[i] != a; i++) {
-		if (adapter[i].request)
-			a->li_base = adapter[i].li_base + adapter[i].li_channels;
-	}
-	k = li_total_channels + a->li_channels;
-	new_li_config_table =
-		(LI_CONFIG *) diva_os_malloc(0, ((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * k) * ((k + 3) & ~3));
-	if (new_li_config_table == NULL) {
-		DBG_ERR(("diva_add_card: failed alloc li_config table."))
-			memset(a, 0, sizeof(DIVA_CAPI_ADAPTER));
-		return (0);
-	}
-
-	/* Prevent access to line interconnect table in process update */
-	diva_os_enter_spin_lock(&api_lock, &old_irql, "add card");
-
-	j = 0;
-	for (i = 0; i < k; i++) {
-		if ((i >= a->li_base) && (i < a->li_base + a->li_channels))
-			memset(&new_li_config_table[i], 0, sizeof(LI_CONFIG));
-		else
-			memcpy(&new_li_config_table[i], &li_config_table[j], sizeof(LI_CONFIG));
-		new_li_config_table[i].flag_table =
-			((byte *) new_li_config_table) + (((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * i) * ((k + 3) & ~3));
-		new_li_config_table[i].coef_table =
-			((byte *) new_li_config_table) + (((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * i + 1) * ((k + 3) & ~3));
-		if ((i >= a->li_base) && (i < a->li_base + a->li_channels)) {
-			new_li_config_table[i].adapter = a;
-			memset(&new_li_config_table[i].flag_table[0], 0, k);
-			memset(&new_li_config_table[i].coef_table[0], 0, k);
-		} else {
-			if (a->li_base != 0) {
-				memcpy(&new_li_config_table[i].flag_table[0],
-				       &li_config_table[j].flag_table[0],
-				       a->li_base);
-				memcpy(&new_li_config_table[i].coef_table[0],
-				       &li_config_table[j].coef_table[0],
-				       a->li_base);
-			}
-			memset(&new_li_config_table[i].flag_table[a->li_base], 0, a->li_channels);
-			memset(&new_li_config_table[i].coef_table[a->li_base], 0, a->li_channels);
-			if (a->li_base + a->li_channels < k) {
-				memcpy(&new_li_config_table[i].flag_table[a->li_base +
-									  a->li_channels],
-				       &li_config_table[j].flag_table[a->li_base],
-				       k - (a->li_base + a->li_channels));
-				memcpy(&new_li_config_table[i].coef_table[a->li_base +
-									  a->li_channels],
-				       &li_config_table[j].coef_table[a->li_base],
-				       k - (a->li_base + a->li_channels));
-			}
-			j++;
-		}
-	}
-	li_total_channels = k;
-
-	mem_to_free = li_config_table;
-
-	li_config_table = new_li_config_table;
-	for (i = card->Id; i < max_adapter; i++) {
-		if (adapter[i].request)
-			adapter[i].li_base += a->li_channels;
-	}
-
-	if (a == &adapter[max_adapter])
-		max_adapter++;
-
-	list_add(&(card->list), &cards);
-	AutomaticLaw(a);
-
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "add card");
-
-	if (mem_to_free) {
-		diva_os_free(0, mem_to_free);
-	}
-
-	i = 0;
-	while (i++ < 30) {
-		if (a->automatic_law > 3)
-			break;
-		diva_os_sleep(10);
-	}
-
-	/* profile information */
-	PUT_WORD(&ctrl->profile.nbchannel, card->d.channels);
-	ctrl->profile.goptions = a->profile.Global_Options;
-	ctrl->profile.support1 = a->profile.B1_Protocols;
-	ctrl->profile.support2 = a->profile.B2_Protocols;
-	ctrl->profile.support3 = a->profile.B3_Protocols;
-	/* manufacturer profile information */
-	ctrl->profile.manu[0] = a->man_profile.private_options;
-	ctrl->profile.manu[1] = a->man_profile.rtp_primary_payloads;
-	ctrl->profile.manu[2] = a->man_profile.rtp_additional_payloads;
-	ctrl->profile.manu[3] = 0;
-	ctrl->profile.manu[4] = 0;
-
-	capi_ctr_ready(ctrl);
-
-	DBG_TRC(("adapter added, max_adapter=%d", max_adapter));
-	return (1);
-}
-
-/*
- *  register appl
- */
-static void diva_register_appl(struct capi_ctr *ctrl, __u16 appl,
-			       capi_register_params *rp)
-{
-	APPL *this;
-	word bnum, xnum;
-	int i = 0;
-	unsigned char *p;
-	void *DataNCCI, *DataFlags, *ReceiveBuffer, *xbuffer_used;
-	void **xbuffer_ptr, **xbuffer_internal;
-	diva_os_spin_lock_magic_t old_irql;
-	unsigned int mem_len;
-	int nconn = rp->level3cnt;
-
-
-	if (diva_os_in_irq()) {
-		DBG_ERR(("CAPI_REGISTER - in irq context !"))
-			return;
-	}
-
-	DBG_TRC(("application register Id=%d", appl))
-
-		if (appl > MAX_APPL) {
-			DBG_ERR(("CAPI_REGISTER - appl.Id exceeds MAX_APPL"))
-				return;
-		}
-
-	if (nconn <= 0)
-		nconn = ctrl->profile.nbchannel * -nconn;
-
-	if (nconn == 0)
-		nconn = ctrl->profile.nbchannel;
-
-	DBG_LOG(("CAPI_REGISTER - Id = %d", appl))
-		DBG_LOG(("  MaxLogicalConnections = %d(%d)", nconn, rp->level3cnt))
-		DBG_LOG(("  MaxBDataBuffers       = %d", rp->datablkcnt))
-		DBG_LOG(("  MaxBDataLength        = %d", rp->datablklen))
-
-		if (nconn < 1 ||
-		    nconn > 255 ||
-		    rp->datablklen < 80 ||
-		    rp->datablklen > 2150 || rp->datablkcnt > 255) {
-			DBG_ERR(("CAPI_REGISTER - invalid parameters"))
-				return;
-		}
-
-	if (application[appl - 1].Id == appl) {
-		DBG_LOG(("CAPI_REGISTER - appl already registered"))
-			return;	/* appl already registered */
-	}
-
-	/* alloc memory */
-
-	bnum = nconn * rp->datablkcnt;
-	xnum = nconn * MAX_DATA_B3;
-
-	mem_len  = bnum * sizeof(word);		/* DataNCCI */
-	mem_len += bnum * sizeof(word);		/* DataFlags */
-	mem_len += bnum * rp->datablklen;	/* ReceiveBuffer */
-	mem_len += xnum;			/* xbuffer_used */
-	mem_len += xnum * sizeof(void *);	/* xbuffer_ptr */
-	mem_len += xnum * sizeof(void *);	/* xbuffer_internal */
-	mem_len += xnum * rp->datablklen;	/* xbuffer_ptr[xnum] */
-
-	DBG_LOG(("  Allocated Memory      = %d", mem_len))
-		if (!(p = diva_os_malloc(0, mem_len))) {
-			DBG_ERR(("CAPI_REGISTER - memory allocation failed"))
-				return;
-		}
-	memset(p, 0, mem_len);
-
-	DataNCCI = (void *)p;
-	p += bnum * sizeof(word);
-	DataFlags = (void *)p;
-	p += bnum * sizeof(word);
-	ReceiveBuffer = (void *)p;
-	p += bnum * rp->datablklen;
-	xbuffer_used = (void *)p;
-	p += xnum;
-	xbuffer_ptr = (void **)p;
-	p += xnum * sizeof(void *);
-	xbuffer_internal = (void **)p;
-	p += xnum * sizeof(void *);
-	for (i = 0; i < xnum; i++) {
-		xbuffer_ptr[i] = (void *)p;
-		p += rp->datablklen;
-	}
-
-	/* initialize application data */
-	diva_os_enter_spin_lock(&api_lock, &old_irql, "register_appl");
-
-	this = &application[appl - 1];
-	memset(this, 0, sizeof(APPL));
-
-	this->Id = appl;
-
-	for (i = 0; i < max_adapter; i++) {
-		adapter[i].CIP_Mask[appl - 1] = 0;
-	}
-
-	this->queue_size = 1000;
-
-	this->MaxNCCI = (byte) nconn;
-	this->MaxNCCIData = (byte) rp->datablkcnt;
-	this->MaxBuffer = bnum;
-	this->MaxDataLength = rp->datablklen;
-
-	this->DataNCCI = DataNCCI;
-	this->DataFlags = DataFlags;
-	this->ReceiveBuffer = ReceiveBuffer;
-	this->xbuffer_used = xbuffer_used;
-	this->xbuffer_ptr = xbuffer_ptr;
-	this->xbuffer_internal = xbuffer_internal;
-	for (i = 0; i < xnum; i++) {
-		this->xbuffer_ptr[i] = xbuffer_ptr[i];
-	}
-
-	CapiRegister(this->Id);
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "register_appl");
-
-}
-
-/*
- *  release appl
- */
-static void diva_release_appl(struct capi_ctr *ctrl, __u16 appl)
-{
-	diva_os_spin_lock_magic_t old_irql;
-	APPL *this = &application[appl - 1];
-	void *mem_to_free = NULL;
-
-	DBG_TRC(("application %d(%d) cleanup", this->Id, appl))
-
-		if (diva_os_in_irq()) {
-			DBG_ERR(("CAPI_RELEASE - in irq context !"))
-				return;
-		}
-
-	diva_os_enter_spin_lock(&api_lock, &old_irql, "release_appl");
-	if (this->Id) {
-		CapiRelease(this->Id);
-		mem_to_free = this->DataNCCI;
-		this->DataNCCI = NULL;
-		this->Id = 0;
-	}
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "release_appl");
-
-	if (mem_to_free)
-		diva_os_free(0, mem_to_free);
-
-}
-
-/*
- *  send message
- */
-static u16 diva_send_message(struct capi_ctr *ctrl,
-			     diva_os_message_buffer_s *dmb)
-{
-	int i = 0;
-	word ret = 0;
-	diva_os_spin_lock_magic_t old_irql;
-	CAPI_MSG *msg = (CAPI_MSG *) DIVA_MESSAGE_BUFFER_DATA(dmb);
-	APPL *this = &application[GET_WORD(&msg->header.appl_id) - 1];
-	diva_card *card = ctrl->driverdata;
-	__u32 length = DIVA_MESSAGE_BUFFER_LEN(dmb);
-	word clength = GET_WORD(&msg->header.length);
-	word command = GET_WORD(&msg->header.command);
-	u16 retval = CAPI_NOERROR;
-
-	if (diva_os_in_irq()) {
-		DBG_ERR(("CAPI_SEND_MSG - in irq context !"))
-			return CAPI_REGOSRESOURCEERR;
-	}
-	DBG_PRV1(("Write - appl = %d, cmd = 0x%x", this->Id, command))
-
-		if (card->remove_in_progress) {
-			DBG_ERR(("CAPI_SEND_MSG - remove in progress!"))
-				return CAPI_REGOSRESOURCEERR;
-		}
-
-	diva_os_enter_spin_lock(&api_lock, &old_irql, "send message");
-
-	if (!this->Id) {
-		diva_os_leave_spin_lock(&api_lock, &old_irql, "send message");
-		return CAPI_ILLAPPNR;
-	}
-
-	/* patch controller number */
-	msg->header.controller = ControllerMap[card->Id]
-		| (msg->header.controller & 0x80);	/* preserve external controller bit */
-
-	switch (command) {
-	default:
-		xlog("\x00\x02", msg, 0x80, clength);
-		break;
-
-	case _DATA_B3_I | RESPONSE:
-#ifndef DIVA_NO_DEBUGLIB
-		if (myDriverDebugHandle.dbgMask & DL_BLK)
-			xlog("\x00\x02", msg, 0x80, clength);
-#endif
-		break;
-
-	case _DATA_B3_R:
-#ifndef DIVA_NO_DEBUGLIB
-		if (myDriverDebugHandle.dbgMask & DL_BLK)
-			xlog("\x00\x02", msg, 0x80, clength);
-#endif
-
-		if (clength == 24)
-			clength = 22;	/* workaround for PPcom bug */
-		/* header is always 22      */
-		if (GET_WORD(&msg->info.data_b3_req.Data_Length) >
-		    this->MaxDataLength
-		    || GET_WORD(&msg->info.data_b3_req.Data_Length) >
-		    (length - clength)) {
-			DBG_ERR(("Write - invalid message size"))
-				retval = CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
-			goto write_end;
-		}
-
-		for (i = 0; i < (MAX_DATA_B3 * this->MaxNCCI)
-			     && this->xbuffer_used[i]; i++);
-		if (i == (MAX_DATA_B3 * this->MaxNCCI)) {
-			DBG_ERR(("Write - too many data pending"))
-				retval = CAPI_SENDQUEUEFULL;
-			goto write_end;
-		}
-		msg->info.data_b3_req.Data = i;
-
-		this->xbuffer_internal[i] = NULL;
-		memcpy(this->xbuffer_ptr[i], &((__u8 *) msg)[clength],
-		       GET_WORD(&msg->info.data_b3_req.Data_Length));
-
-#ifndef DIVA_NO_DEBUGLIB
-		if ((myDriverDebugHandle.dbgMask & DL_BLK)
-		    && (myDriverDebugHandle.dbgMask & DL_XLOG)) {
-			int j;
-			for (j = 0; j <
-				     GET_WORD(&msg->info.data_b3_req.Data_Length);
-			     j += 256) {
-				DBG_BLK((((char *) this->xbuffer_ptr[i]) + j,
-					 ((GET_WORD(&msg->info.data_b3_req.Data_Length) - j) <
-					  256) ? (GET_WORD(&msg->info.data_b3_req.Data_Length) - j) : 256))
-					if (!(myDriverDebugHandle.dbgMask & DL_PRV0))
-						break;	/* not more if not explicitly requested */
-			}
-		}
-#endif
-		break;
-	}
-
-	memcpy(mapped_msg, msg, (__u32) clength);
-	mapped_msg->header.controller = MapController(mapped_msg->header.controller);
-	mapped_msg->header.length = clength;
-	mapped_msg->header.command = command;
-	mapped_msg->header.number = GET_WORD(&msg->header.number);
-
-	ret = api_put(this, mapped_msg);
-	switch (ret) {
-	case 0:
-		break;
-	case _BAD_MSG:
-		DBG_ERR(("Write - bad message"))
-			retval = CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
-		break;
-	case _QUEUE_FULL:
-		DBG_ERR(("Write - queue full"))
-			retval = CAPI_SENDQUEUEFULL;
-		break;
-	default:
-		DBG_ERR(("Write - api_put returned unknown error"))
-			retval = CAPI_UNKNOWNNOTPAR;
-		break;
-	}
-
-write_end:
-	diva_os_leave_spin_lock(&api_lock, &old_irql, "send message");
-	if (retval == CAPI_NOERROR)
-		diva_os_free_message_buffer(dmb);
-	return retval;
-}
-
-
-/*
- * cards request function
- */
-static void DIRequest(ENTITY *e)
-{
-	DIVA_CAPI_ADAPTER *a = &(adapter[(byte) e->user[0]]);
-	diva_card *os_card = (diva_card *) a->os_card;
-
-	if (e->Req && (a->FlowControlIdTable[e->ReqCh] == e->Id)) {
-		a->FlowControlSkipTable[e->ReqCh] = 1;
-	}
-
-	(*(os_card->d.request)) (e);
-}
-
-/*
- * callback function from didd
- */
-static void didd_callback(void *context, DESCRIPTOR *adapter, int removal)
-{
-	if (adapter->type == IDI_DADAPTER) {
-		DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
-		return;
-	} else if (adapter->type == IDI_DIMAINT) {
-		if (removal) {
-			stop_dbg();
-		} else {
-			memcpy(&MAdapter, adapter, sizeof(MAdapter));
-			dprintf = (DIVA_DI_PRINTF) MAdapter.request;
-			DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT);
-		}
-	} else if ((adapter->type > 0) && (adapter->type < 16)) {	/* IDI Adapter */
-		if (removal) {
-			divacapi_remove_card(adapter);
-		} else {
-			diva_add_card(adapter);
-		}
-	}
-	return;
-}
-
-/*
- * connect to didd
- */
-static int divacapi_connect_didd(void)
-{
-	int x = 0;
-	int dadapter = 0;
-	IDI_SYNC_REQ req;
-	DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
-	DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
-	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-		if (DIDD_Table[x].type == IDI_DIMAINT) {	/* MAINT found */
-			memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
-			dprintf = (DIVA_DI_PRINTF) MAdapter.request;
-			DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT);
-			break;
-		}
-	}
-	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-		if (DIDD_Table[x].type == IDI_DADAPTER) {	/* DADAPTER found */
-			dadapter = 1;
-			memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
-			req.didd_notify.e.Req = 0;
-			req.didd_notify.e.Rc =
-				IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
-			req.didd_notify.info.callback = (void *)didd_callback;
-			req.didd_notify.info.context = NULL;
-			DAdapter.request((ENTITY *)&req);
-			if (req.didd_notify.e.Rc != 0xff) {
-				stop_dbg();
-				return (0);
-			}
-			notify_handle = req.didd_notify.info.handle;
-		}
-		else if ((DIDD_Table[x].type > 0) && (DIDD_Table[x].type < 16)) {	/* IDI Adapter found */
-			diva_add_card(&DIDD_Table[x]);
-		}
-	}
-
-	if (!dadapter) {
-		stop_dbg();
-	}
-
-	return (dadapter);
-}
-
-/*
- * diconnect from didd
- */
-static void divacapi_disconnect_didd(void)
-{
-	IDI_SYNC_REQ req;
-
-	stop_dbg();
-
-	req.didd_notify.e.Req = 0;
-	req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
-	req.didd_notify.info.handle = notify_handle;
-	DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * we do not provide date/time here,
- * the application should do this.
- */
-int fax_head_line_time(char *buffer)
-{
-	return (0);
-}
-
-/*
- * init (alloc) main structures
- */
-static int __init init_main_structs(void)
-{
-	if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) {
-		DBG_ERR(("init: failed alloc mapped_msg."))
-			return 0;
-	}
-
-	if (!(adapter = diva_os_malloc(0, sizeof(DIVA_CAPI_ADAPTER) * MAX_DESCRIPTORS))) {
-		DBG_ERR(("init: failed alloc adapter struct."))
-			diva_os_free(0, mapped_msg);
-		return 0;
-	}
-	memset(adapter, 0, sizeof(DIVA_CAPI_ADAPTER) * MAX_DESCRIPTORS);
-
-	if (!(application = diva_os_malloc(0, sizeof(APPL) * MAX_APPL))) {
-		DBG_ERR(("init: failed alloc application struct."))
-			diva_os_free(0, mapped_msg);
-		diva_os_free(0, adapter);
-		return 0;
-	}
-	memset(application, 0, sizeof(APPL) * MAX_APPL);
-
-	return (1);
-}
-
-/*
- * remove (free) main structures
- */
-static void remove_main_structs(void)
-{
-	if (application)
-		diva_os_free(0, application);
-	if (adapter)
-		diva_os_free(0, adapter);
-	if (mapped_msg)
-		diva_os_free(0, mapped_msg);
-}
-
-/*
- * api_remove_start
- */
-static void do_api_remove_start(void)
-{
-	diva_os_spin_lock_magic_t old_irql;
-	int ret = 1, count = 100;
-
-	do {
-		diva_os_enter_spin_lock(&api_lock, &old_irql, "api remove start");
-		ret = api_remove_start();
-		diva_os_leave_spin_lock(&api_lock, &old_irql, "api remove start");
-
-		diva_os_sleep(10);
-	} while (ret && count--);
-
-	if (ret)
-		DBG_ERR(("could not remove signaling ID's"))
-			}
-
-/*
- * init
- */
-int __init init_capifunc(void)
-{
-	diva_os_initialize_spin_lock(&api_lock, "capifunc");
-	memset(ControllerMap, 0, MAX_DESCRIPTORS + 1);
-	max_adapter = 0;
-
-
-	if (!init_main_structs()) {
-		DBG_ERR(("init: failed to init main structs."))
-			diva_os_destroy_spin_lock(&api_lock, "capifunc");
-		return (0);
-	}
-
-	if (!divacapi_connect_didd()) {
-		DBG_ERR(("init: failed to connect to DIDD."))
-			do_api_remove_start();
-		divacapi_remove_cards();
-		remove_main_structs();
-		diva_os_destroy_spin_lock(&api_lock, "capifunc");
-		return (0);
-	}
-
-	return (1);
-}
-
-/*
- * finit
- */
-void __exit finit_capifunc(void)
-{
-	do_api_remove_start();
-	divacapi_disconnect_didd();
-	divacapi_remove_cards();
-	remove_main_structs();
-	diva_os_destroy_spin_lock(&api_lock, "capifunc");
-}
diff --git a/drivers/isdn/hardware/eicon/capifunc.h b/drivers/isdn/hardware/eicon/capifunc.h
deleted file mode 100644
index e96c45b..0000000
--- a/drivers/isdn/hardware/eicon/capifunc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* $Id: capifunc.h,v 1.11.4.1 2004/08/28 20:03:53 armin Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface common functions
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#ifndef __CAPIFUNC_H__
-#define __CAPIFUNC_H__
-
-#define DRRELMAJOR  2
-#define DRRELMINOR  0
-#define DRRELEXTRA  ""
-
-#define M_COMPANY "Eicon Networks"
-
-extern char DRIVERRELEASE_CAPI[];
-
-typedef struct _diva_card {
-	struct list_head list;
-	int remove_in_progress;
-	int Id;
-	struct capi_ctr capi_ctrl;
-	DIVA_CAPI_ADAPTER *adapter;
-	DESCRIPTOR d;
-	char name[32];
-} diva_card;
-
-/*
- * prototypes
- */
-int init_capifunc(void);
-void finit_capifunc(void);
-
-#endif /* __CAPIFUNC_H__ */
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
deleted file mode 100644
index f9244dc..0000000
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* $Id: capimain.c,v 1.24 2003/09/09 06:51:05 schindler Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-
-#include "os_capi.h"
-
-#include "platform.h"
-#include "di_defs.h"
-#include "capi20.h"
-#include "divacapi.h"
-#include "cp_vers.h"
-#include "capifunc.h"
-
-static char *main_revision = "$Revision: 1.24 $";
-static char *DRIVERNAME =
-	"Eicon DIVA - CAPI Interface driver (http://www.melware.net)";
-static char *DRIVERLNAME = "divacapi";
-
-MODULE_DESCRIPTION("CAPI driver for Eicon DIVA cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("CAPI and DIVA card drivers");
-MODULE_LICENSE("GPL");
-
-/*
- * get revision number from revision string
- */
-static char *getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "1.0";
-	return rev;
-
-}
-
-/*
- * alloc a message buffer
- */
-diva_os_message_buffer_s *diva_os_alloc_message_buffer(unsigned long size,
-						       void **data_buf)
-{
-	diva_os_message_buffer_s *dmb = alloc_skb(size, GFP_ATOMIC);
-	if (dmb) {
-		*data_buf = skb_put(dmb, size);
-	}
-	return (dmb);
-}
-
-/*
- * free a message buffer
- */
-void diva_os_free_message_buffer(diva_os_message_buffer_s *dmb)
-{
-	kfree_skb(dmb);
-}
-
-/*
- * proc function for controller info
- */
-static int diva_ctl_proc_show(struct seq_file *m, void *v)
-{
-	struct capi_ctr *ctrl = m->private;
-	diva_card *card = (diva_card *) ctrl->driverdata;
-
-	seq_printf(m, "%s\n", ctrl->name);
-	seq_printf(m, "Serial No. : %s\n", ctrl->serial);
-	seq_printf(m, "Id         : %d\n", card->Id);
-	seq_printf(m, "Channels   : %d\n", card->d.channels);
-
-	return 0;
-}
-
-/*
- * set additional os settings in capi_ctr struct
- */
-void diva_os_set_controller_struct(struct capi_ctr *ctrl)
-{
-	ctrl->driver_name = DRIVERLNAME;
-	ctrl->load_firmware = NULL;
-	ctrl->reset_ctr = NULL;
-	ctrl->proc_show = diva_ctl_proc_show;
-	ctrl->owner = THIS_MODULE;
-}
-
-/*
- * module init
- */
-static int __init divacapi_init(void)
-{
-	char tmprev[32];
-	int ret = 0;
-
-	sprintf(DRIVERRELEASE_CAPI, "%d.%d%s", DRRELMAJOR, DRRELMINOR,
-		DRRELEXTRA);
-
-	printk(KERN_INFO "%s\n", DRIVERNAME);
-	printk(KERN_INFO "%s: Rel:%s  Rev:", DRIVERLNAME, DRIVERRELEASE_CAPI);
-	strcpy(tmprev, main_revision);
-	printk("%s  Build: %s(%s)\n", getrev(tmprev),
-	       diva_capi_common_code_build, DIVA_BUILD);
-
-	if (!(init_capifunc())) {
-		printk(KERN_ERR "%s: failed init capi_driver.\n",
-		       DRIVERLNAME);
-		ret = -EIO;
-	}
-
-	return ret;
-}
-
-/*
- * module exit
- */
-static void __exit divacapi_exit(void)
-{
-	finit_capifunc();
-	printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divacapi_init);
-module_exit(divacapi_exit);
diff --git a/drivers/isdn/hardware/eicon/cardtype.h b/drivers/isdn/hardware/eicon/cardtype.h
deleted file mode 100644
index 8b20e22..0000000
--- a/drivers/isdn/hardware/eicon/cardtype.h
+++ /dev/null
@@ -1,1098 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef _CARDTYPE_H_
-#define _CARDTYPE_H_
-#ifndef CARDTYPE_H_WANT_DATA
-#define CARDTYPE_H_WANT_DATA   0
-#endif
-#ifndef CARDTYPE_H_WANT_IDI_DATA
-#define CARDTYPE_H_WANT_IDI_DATA  0
-#endif
-#ifndef CARDTYPE_H_WANT_RESOURCE_DATA
-#define CARDTYPE_H_WANT_RESOURCE_DATA 1
-#endif
-#ifndef CARDTYPE_H_WANT_FILE_DATA
-#define CARDTYPE_H_WANT_FILE_DATA  1
-#endif
-/*
- * D-channel protocol identifiers
- *
- * Attention: Unfortunately the identifiers defined here differ from
- *      the identifiers used in Protocol/1/Common/prot/q931.h .
- *     The only reason for this is that q931.h has not a global
- *     scope and we did not know about the definitions there.
- *     But the definitions here cannot be changed easily because
- *     they are used in setup scripts and programs.
- *     Thus the definitions here have to be mapped if they are
- *     used in the protocol code context !
- *
- * Now the identifiers are defined in the q931lib/constant.h file.
- * Unfortunately this file has also not a global scope.
- * But beginning with PROTTYPE_US any new identifier will get the same
- * value as the corresponding PROT_* definition in q931lib/constant.h !
- */
-#define PROTTYPE_MINVAL     0
-#define PROTTYPE_ETSI       0
-#define PROTTYPE_1TR6       1
-#define PROTTYPE_BELG       2
-#define PROTTYPE_FRANC      3
-#define PROTTYPE_ATEL       4
-#define PROTTYPE_NI         5  /* DMS 100, Nortel, National ISDN */
-#define PROTTYPE_5ESS       6  /* 5ESS   , AT&T,   5ESS Custom   */
-#define PROTTYPE_JAPAN      7
-#define PROTTYPE_SWED       8
-#define PROTTYPE_US         9  /* US autodetect */
-#define PROTTYPE_ITALY      10
-#define PROTTYPE_TWAN       11
-#define PROTTYPE_AUSTRAL    12
-#define PROTTYPE_4ESDN      13
-#define PROTTYPE_4ESDS      14
-#define PROTTYPE_4ELDS      15
-#define PROTTYPE_4EMGC      16
-#define PROTTYPE_4EMGI      17
-#define PROTTYPE_HONGKONG   18
-#define PROTTYPE_RBSCAS     19
-#define PROTTYPE_CORNETN    20
-#define PROTTYPE_QSIG       21
-#define PROTTYPE_NI_EWSD    22 /* EWSD, Siemens, National ISDN   */
-#define PROTTYPE_5ESS_NI    23 /* 5ESS, Lucent, National ISDN    */
-#define PROTTYPE_T1CORNETN  24
-#define PROTTYPE_CORNETNQ   25
-#define PROTTYPE_T1CORNETNQ 26
-#define PROTTYPE_T1QSIG     27
-#define PROTTYPE_E1UNCH     28
-#define PROTTYPE_T1UNCH     29
-#define PROTTYPE_E1CHAN     30
-#define PROTTYPE_T1CHAN     31
-#define PROTTYPE_R2CAS      32
-#define PROTTYPE_MAXVAL     32
-/*
- * Card type identifiers
- */
-#define CARD_UNKNOWN                      0
-#define CARD_NONE                         0
-/* DIVA cards */
-#define CARDTYPE_DIVA_MCA                 0
-#define CARDTYPE_DIVA_ISA                 1
-#define CARDTYPE_DIVA_PCM                 2
-#define CARDTYPE_DIVAPRO_ISA              3
-#define CARDTYPE_DIVAPRO_PCM              4
-#define CARDTYPE_DIVAPICO_ISA             5
-#define CARDTYPE_DIVAPICO_PCM             6
-/* DIVA 2.0 cards */
-#define CARDTYPE_DIVAPRO20_PCI            7
-#define CARDTYPE_DIVA20_PCI               8
-/* S cards */
-#define CARDTYPE_QUADRO_ISA               9
-#define CARDTYPE_S_ISA                    10
-#define CARDTYPE_S_MCA                    11
-#define CARDTYPE_SX_ISA                   12
-#define CARDTYPE_SX_MCA                   13
-#define CARDTYPE_SXN_ISA                  14
-#define CARDTYPE_SXN_MCA                  15
-#define CARDTYPE_SCOM_ISA                 16
-#define CARDTYPE_SCOM_MCA                 17
-#define CARDTYPE_PR_ISA                   18
-#define CARDTYPE_PR_MCA                   19
-/* Diva Server cards (formerly called Maestra, later Amadeo) */
-#define CARDTYPE_MAESTRA_ISA              20
-#define CARDTYPE_MAESTRA_PCI              21
-/* Diva Server cards to be developed (Quadro, Primary rate) */
-#define CARDTYPE_DIVASRV_Q_8M_PCI         22
-#define CARDTYPE_DIVASRV_P_30M_PCI        23
-#define CARDTYPE_DIVASRV_P_2M_PCI         24
-#define CARDTYPE_DIVASRV_P_9M_PCI         25
-/* DIVA 2.0 cards */
-#define CARDTYPE_DIVA20_ISA               26
-#define CARDTYPE_DIVA20U_ISA              27
-#define CARDTYPE_DIVA20U_PCI              28
-#define CARDTYPE_DIVAPRO20_ISA            29
-#define CARDTYPE_DIVAPRO20U_ISA           30
-#define CARDTYPE_DIVAPRO20U_PCI           31
-/* DIVA combi cards (piccola ISDN + rockwell V.34 modem) */
-#define CARDTYPE_DIVAMOBILE_PCM           32
-#define CARDTYPE_TDKGLOBALPRO_PCM         33
-/* DIVA Pro PC OEM card for 'New Media Corporation' */
-#define CARDTYPE_NMC_DIVAPRO_PCM          34
-/* DIVA Pro 2.0 OEM cards for 'British Telecom' */
-#define CARDTYPE_BT_EXLANE_PCI            35
-#define CARDTYPE_BT_EXLANE_ISA            36
-/* DIVA low cost cards, 1st name DIVA 3.0, 2nd DIVA 2.01, 3rd ??? */
-#define CARDTYPE_DIVALOW_ISA              37
-#define CARDTYPE_DIVALOWU_ISA             38
-#define CARDTYPE_DIVALOW_PCI              39
-#define CARDTYPE_DIVALOWU_PCI             40
-/* DIVA combi cards (piccola ISDN + rockwell V.90 modem) */
-#define CARDTYPE_DIVAMOBILE_V90_PCM       41
-#define CARDTYPE_TDKGLOBPRO_V90_PCM       42
-#define CARDTYPE_DIVASRV_P_23M_PCI        43
-#define CARDTYPE_DIVALOW_USB              44
-/* DIVA Audio (CT) family */
-#define CARDTYPE_DIVA_CT_ST               45
-#define CARDTYPE_DIVA_CT_U                46
-#define CARDTYPE_DIVA_CTLITE_ST           47
-#define CARDTYPE_DIVA_CTLITE_U            48
-/* DIVA ISDN plus V.90 series */
-#define CARDTYPE_DIVAISDN_V90_PCM         49
-#define CARDTYPE_DIVAISDN_V90_PCI         50
-#define CARDTYPE_DIVAISDN_TA              51
-/* DIVA Server Voice cards */
-#define CARDTYPE_DIVASRV_VOICE_Q_8M_PCI   52
-/* DIVA Server V2 cards */
-#define CARDTYPE_DIVASRV_Q_8M_V2_PCI      53
-#define CARDTYPE_DIVASRV_P_30M_V2_PCI     54
-/* DIVA Server Voice V2 cards */
-#define CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI 55
-#define CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI 56
-/* Diva LAN */
-#define CARDTYPE_DIVAISDN_LAN             57
-#define CARDTYPE_DIVA_202_PCI_ST          58
-#define CARDTYPE_DIVA_202_PCI_U           59
-#define CARDTYPE_DIVASRV_B_2M_V2_PCI      60
-#define CARDTYPE_DIVASRV_B_2F_PCI         61
-#define CARDTYPE_DIVALOW_USBV2            62
-#define CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI 63
-#define CARDTYPE_DIVA_PRO_30_PCI_ST       64
-#define CARDTYPE_DIVA_CT_ST_V20           65
-/* Diva Mobile V.90 PC Card and Diva ISDN PC Card */
-#define CARDTYPE_DIVAMOBILE_V2_PCM        66
-#define CARDTYPE_DIVA_V2_PCM              67
-/* Re-badged Diva Pro PC Card */
-#define CARDTYPE_DIVA_PC_CARD             68
-/* next free card type identifier */
-#define CARDTYPE_MAX                      69
-/*
- * The card families
- */
-#define FAMILY_DIVA   1
-#define FAMILY_S   2
-#define FAMILY_MAESTRA  3
-#define FAMILY_MAX   4
-/*
- * The basic card types
- */
-#define CARD_DIVA           1  /* DSP based, old DSP */
-#define CARD_PRO            2  /* DSP based, new DSP */
-#define CARD_PICO           3  /* HSCX based   */
-#define CARD_S    4  /* IDI on board based */
-#define CARD_SX    5  /* IDI on board based */
-#define CARD_SXN   6  /* IDI on board based */
-#define CARD_SCOM   7  /* IDI on board based */
-#define CARD_QUAD   8  /* IDI on board based */
-#define CARD_PR    9  /* IDI on board based */
-#define CARD_MAE         10  /* IDI on board based */
-#define CARD_MAEQ        11  /* IDI on board based */
-#define CARD_MAEP        12  /* IDI on board based */
-#define CARD_DIVALOW  13  /* IPAC based   */
-#define CARD_CT    14  /* SCOUT based          */
-#define CARD_DIVATA   15  /* DIVA TA */
-#define CARD_DIVALAN  16  /* DIVA LAN */
-#define CARD_MAE2         17  /* IDI on board based */
-#define CARD_MAX   18
-/*
- * The internal card types of the S family
- */
-#define CARD_I_NONE   0
-#define CARD_I_S   0
-#define CARD_I_SX   1
-#define CARD_I_SCOM   2
-#define CARD_I_QUAD   3
-#define CARD_I_PR   4
-/*
- * The bus types we support
- */
-#define BUS_ISA             1
-#define BUS_PCM             2
-#define BUS_PCI             3
-#define BUS_MCA             4
-#define BUS_USB             5
-#define BUS_COM    6
-#define BUS_LAN    7
-/*
- * The chips we use for B-channel traffic
- */
-#define CHIP_NONE           0
-#define CHIP_DSP            1
-#define CHIP_HSCX           2
-#define CHIP_IPAC           3
-#define CHIP_SCOUT          4
-#define CHIP_EXTERN         5
-#define CHIP_IPACX          6
-/*
- * The structures where the card properties are aggregated by id
- */
-typedef struct CARD_PROPERTIES
-{   char     *Name;  /* official marketing name     */
-	unsigned short PnPId;  /* plug and play ID (for non PCMIA cards) */
-	unsigned short Version; /* major and minor version no of the card */
-	unsigned char DescType; /* card type to set in the IDI descriptor */
-	unsigned char  Family;  /* basic family of the card     */
-	unsigned short  Features; /* features bits to set in the IDI desc. */
-	unsigned char Card;  /* basic card type       */
-	unsigned char IType;  /* internal type of S cards (read from ram) */
-	unsigned char  Bus;  /* bus type this card is designed for  */
-	unsigned char  Chip;  /* chipset used on card      */
-	unsigned char Adapters; /* number of adapters on card    */
-	unsigned char Channels; /* # of channels per adapter    */
-	unsigned short E_info;  /* # of ram entity info structs per adapter */
-	unsigned short SizeIo;  /* size of IO window per adapter   */
-	unsigned short SizeMem; /* size of memory window per adapter  */
-} CARD_PROPERTIES;
-typedef struct CARD_RESOURCE
-{ unsigned char Int[10];
-	unsigned short IoFirst;
-	unsigned short IoStep;
-	unsigned short IoCnt;
-	unsigned long MemFirst;
-	unsigned long MemStep;
-	unsigned short MemCnt;
-} CARD_RESOURCE;
-/* test if the card of type 't' is a plug & play card */
-#define IS_PNP(t)						\
-	(							\
-		(						\
-			CardProperties[t].Bus != BUS_ISA	\
-			&&					\
-			CardProperties[t].Bus != BUS_MCA	\
-			)					\
-		||						\
-		(						\
-			CardProperties[t].Family != FAMILY_S	\
-			&&					\
-			CardProperties[t].Card != CARD_DIVA	\
-			)					\
-		)
-/* extract IDI Descriptor info for card type 't' (p == DescType/Features) */
-#define IDI_PROP(t, p) (CardProperties[t].p)
-#if CARDTYPE_H_WANT_DATA
-#if CARDTYPE_H_WANT_IDI_DATA
-/* include "di_defs.h" for IDI adapter type and feature flag definitions */
-#include "di_defs.h"
-#else /*!CARDTYPE_H_WANT_IDI_DATA*/
-/* define IDI adapter types and feature flags here to prevent inclusion  */
-#ifndef IDI_ADAPTER_S
-#define IDI_ADAPTER_S           1
-#define IDI_ADAPTER_PR          2
-#define IDI_ADAPTER_DIVA        3
-#define IDI_ADAPTER_MAESTRA     4
-#endif
-#ifndef DI_VOICE
-#define DI_VOICE          0x0 /* obsolete define */
-#define DI_FAX3           0x1
-#define DI_MODEM          0x2
-#define DI_POST           0x4
-#define DI_V110           0x8
-#define DI_V120           0x10
-#define DI_POTS           0x20
-#define DI_CODEC          0x40
-#define DI_MANAGE         0x80
-#define DI_V_42           0x0100
-#define DI_EXTD_FAX       0x0200 /* Extended FAX (ECM, 2D, T.6, Polling) */
-#define DI_AT_PARSER      0x0400 /* Build-in AT Parser in the L2 */
-#define DI_VOICE_OVER_IP  0x0800 /* Voice over IP support */
-#endif
-#endif /*CARDTYPE_H_WANT_IDI_DATA*/
-#define DI_V1x0         (DI_V110 | DI_V120)
-#define DI_NULL         0x0000
-#if defined(SOFT_DSP_SUPPORT)
-#define SOFT_DSP_ADD_FEATURES  (DI_MODEM | DI_FAX3 | DI_AT_PARSER)
-#else
-#define SOFT_DSP_ADD_FEATURES  0
-#endif
-#if defined(SOFT_V110_SUPPORT)
-#define DI_SOFT_V110  DI_V110
-#else
-#define DI_SOFT_V110  0
-#endif
-/*--- CardProperties [Index=CARDTYPE_....] ---------------------------------*/
-CARD_PROPERTIES CardProperties[] =
-{
-	{ /*  0  */
-		"Diva MCA",       0x6336,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3,
-		CARD_DIVA,   CARD_I_NONE, BUS_MCA, CHIP_DSP,
-		1, 2,  0,   8,      0
-	},
-	{ /*  1  */
-		"Diva ISA",       0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3,
-		CARD_DIVA,   CARD_I_NONE, BUS_ISA, CHIP_DSP,
-		1, 2,  0,   8,      0
-	},
-	{ /*  2  */
-		"Diva/PCM",       0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3,
-		CARD_DIVA,   CARD_I_NONE, BUS_PCM, CHIP_DSP,
-		1, 2,  0,   8,      0
-	},
-	{ /*  3  */
-		"Diva PRO ISA",      0x0031,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
-		CARD_PRO,   CARD_I_NONE, BUS_ISA, CHIP_DSP,
-		1, 2,  0,   8,      0
-	},
-	{ /*  4  */
-		"Diva PRO PC-Card",     0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_PRO,   CARD_I_NONE, BUS_PCM, CHIP_DSP,
-		1, 2,   0,   8,      0
-	},
-	{ /*  5  */
-		"Diva PICCOLA ISA",     0x0051,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_ISA, CHIP_HSCX,
-		1, 2,   0,   8,      0
-	},
-	{ /*  6  */
-		"Diva PICCOLA PCM",     0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCM, CHIP_HSCX,
-		1, 2,   0,   8,      0
-	},
-	{ /*  7  */
-		"Diva PRO 2.0 S/T PCI",    0xe001,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
-		CARD_PRO,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 2,   0,   8,      0
-	},
-	{ /*  8  */
-		"Diva 2.0 S/T PCI",     0xe002,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCI, CHIP_HSCX,
-		1, 2,   0,   8,      0
-	},
-	{ /*  9  */
-		"QUADRO ISA",      0x0000,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_NULL,
-		CARD_QUAD,   CARD_I_QUAD, BUS_ISA, CHIP_NONE,
-		4, 2,   16,  0,  0x800
-	},
-	{ /* 10  */
-		"S ISA",       0x0000,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_CODEC,
-		CARD_S,    CARD_I_S,  BUS_ISA, CHIP_NONE,
-		1, 1,   16,  0,  0x800
-	},
-	{ /* 11  */
-		"S MCA",       0x6a93,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_CODEC,
-		CARD_S,    CARD_I_S,  BUS_MCA, CHIP_NONE,
-		1, 1,   16,  16,  0x400
-	},
-	{ /* 12 */
-		"SX ISA",       0x0000,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_NULL,
-		CARD_SX,   CARD_I_SX,  BUS_ISA, CHIP_NONE,
-		1, 2,  16,  0,  0x800
-	},
-	{ /* 13 */
-		"SX MCA",       0x6a93,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_NULL,
-		CARD_SX,   CARD_I_SX,  BUS_MCA, CHIP_NONE,
-		1, 2,  16,  16,  0x400
-	},
-	{ /* 14 */
-		"SXN ISA",       0x0000,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_NULL,
-		CARD_SXN,   CARD_I_SCOM, BUS_ISA, CHIP_NONE,
-		1, 2,   16,  0,   0x800
-	},
-	{ /* 15 */
-		"SXN MCA",       0x6a93,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_NULL,
-		CARD_SXN,   CARD_I_SCOM, BUS_MCA, CHIP_NONE,
-		1, 2,  16,  16,  0x400
-	},
-	{ /* 16 */
-		"SCOM ISA",       0x0000,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_CODEC,
-		CARD_SCOM,   CARD_I_SCOM, BUS_ISA, CHIP_NONE,
-		1, 2,   16,  0,   0x800
-	},
-	{ /* 17 */
-		"SCOM MCA",       0x6a93,  0x0100,
-		IDI_ADAPTER_S,  FAMILY_S,  DI_CODEC,
-		CARD_SCOM,   CARD_I_SCOM, BUS_MCA, CHIP_NONE,
-		1, 2,  16,  16,  0x400
-	},
-	{ /* 18 */
-		"S2M ISA",       0x0000,  0x0100,
-		IDI_ADAPTER_PR,  FAMILY_S,  DI_NULL,
-		CARD_PR,   CARD_I_PR,  BUS_ISA, CHIP_NONE,
-		1, 30,  256, 0,   0x4000
-	},
-	{ /* 19 */
-		"S2M MCA",       0x6abb,  0x0100,
-		IDI_ADAPTER_PR,  FAMILY_S,  DI_NULL,
-		CARD_PR,   CARD_I_PR,  BUS_MCA, CHIP_NONE,
-		1, 30,  256, 16,  0x4000
-	},
-	{ /* 20 */
-		"Diva Server BRI-2M ISA",   0x0041,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAE,   CARD_I_NONE, BUS_ISA, CHIP_DSP,
-		1, 2,   16,  8,  0
-	},
-	{ /* 21 */
-		"Diva Server BRI-2M PCI",   0xE010,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAE,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 2,   16,  8,   0
-	},
-	{ /* 22 */
-		"Diva Server 4BRI-8M PCI",   0xE012,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAEQ,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		4, 2,   16,  8,   0
-	},
-	{ /* 23 */
-		"Diva Server PRI-30M PCI",   0xE014,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAEP,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 30,  256,  8,   0
-	},
-	{ /* 24 */
-		"Diva Server PRI-2M PCI",   0xe014,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAEP,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 30,  256,  8,   0
-	},
-	{ /* 25 */
-		"Diva Server PRI-9M PCI",   0x0000,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAEP,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 30,     256,  8,   0
-	},
-	{ /* 26 */
-		"Diva 2.0 S/T ISA",     0x0071,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_ISA, CHIP_HSCX,
-		1, 2,  0,   8,   0
-	},
-	{ /* 27 */
-		"Diva 2.0 U ISA",     0x0091,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_ISA, CHIP_HSCX,
-		1, 2,   0,   8,   0
-	},
-	{ /* 28 */
-		"Diva 2.0 U PCI",     0xe004,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCI, CHIP_HSCX,
-		1, 2,   0,   8,   0
-	},
-	{ /* 29 */
-		"Diva PRO 2.0 S/T ISA",    0x0061,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
-		CARD_PRO,   CARD_I_NONE, BUS_ISA, CHIP_DSP,
-		1, 2,  0,   8,   0
-	},
-	{ /* 30 */
-		"Diva PRO 2.0 U ISA",    0x0081,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
-		CARD_PRO,   CARD_I_NONE, BUS_ISA, CHIP_DSP,
-		1, 2,  0,   8,   0
-	},
-	{ /* 31 */
-		"Diva PRO 2.0 U PCI",    0xe003,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
-		CARD_PRO,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 2,   0,   8,   0
-	},
-	{ /* 32 */
-		"Diva MOBILE",      0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCM, CHIP_HSCX,
-		1, 2,  0,   8,   0
-	},
-	{ /* 33 */
-		"TDK DFI3600",      0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCM, CHIP_HSCX,
-		1, 2,  0,   8,   0
-	},
-	{ /* 34 (OEM version of 4 - "Diva PRO PC-Card") */
-		"New Media ISDN",     0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_PRO,   CARD_I_NONE, BUS_PCM, CHIP_DSP,
-		1, 2,   0,   8,   0
-	},
-	{ /* 35 (OEM version of 7 - "Diva PRO 2.0 S/T PCI") */
-		"BT ExLane PCI",     0xe101,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
-		CARD_PRO,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 2,   0,   8,   0
-	},
-	{ /* 36 (OEM version of 29 - "Diva PRO 2.0 S/T ISA") */
-		"BT ExLane ISA",     0x1061,  0x0200,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
-		CARD_PRO,   CARD_I_NONE, BUS_ISA, CHIP_DSP,
-		1, 2,   0,   8,   0
-	},
-	{ /* 37 */
-		"Diva 2.01 S/T ISA",    0x00A1,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_ISA, CHIP_IPAC,
-		1, 2,   0,   8,      0
-	},
-	{ /* 38 */
-		"Diva 2.01 U ISA",     0x00B1,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_ISA, CHIP_IPAC,
-		1, 2,   0,   8,      0
-	},
-	{ /* 39 */
-		"Diva 2.01 S/T PCI",    0xe005,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_PCI, CHIP_IPAC,
-		1, 2,   0,   8,   0
-	},
-	{ /* 40        no ID yet */
-		"Diva 2.01 U PCI",     0x0000,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_PCI, CHIP_IPAC,
-		1, 2,   0,   8,   0
-	},
-	{ /* 41 */
-		"Diva MOBILE V.90",     0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCM, CHIP_HSCX,
-		1, 2,  0,   8,   0
-	},
-	{ /* 42 */
-		"TDK DFI3600 V.90",     0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCM, CHIP_HSCX,
-		1, 2,  0,   8,   0
-	},
-	{ /* 43 */
-		"Diva Server PRI-23M PCI",   0xe014,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAEP,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 30,  256,  8,   0
-	},
-	{ /* 44 */
-		"Diva 2.01 S/T USB",    0x1000,     0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_USB, CHIP_IPAC,
-		1,  2,  0,  8,   0
-	},
-	{ /* 45 */
-		"Diva CT S/T PCI",    0xe006,  0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
-		CARD_CT,       CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1,  2,  0,  0,   0
-	},
-	{ /* 46 */
-		"Diva CT U PCI",     0xe007,  0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
-		CARD_CT,       CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1,  2,  0,  0,   0
-	},
-	{ /* 47 */
-		"Diva CT Lite S/T PCI",   0xe008,  0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
-		CARD_CT,       CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1,  2,  0,  0,   0
-	},
-	{ /* 48 */
-		"Diva CT Lite U PCI",   0xe009,  0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
-		CARD_CT,       CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1,  2,  0,  0,   0
-	},
-	{ /* 49 */
-		"Diva ISDN+V.90 PC Card", 0x8D8C, 0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
-		CARD_DIVALOW, CARD_I_NONE, BUS_PCM, CHIP_IPAC,
-		1, 2,  0,   8,   0
-	},
-	{ /* 50 */
-		"Diva ISDN+V.90 PCI",    0xe00A,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120  | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_PCI, CHIP_IPAC,
-		1, 2,   0,   8,   0
-	},
-	{ /* 51 (DivaTA)      no ID */
-		"Diva TA",       0x0000,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V110 | DI_FAX3 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVATA,  CARD_I_NONE, BUS_COM, CHIP_EXTERN,
-		1, 1,   0,   8,   0
-	},
-	{ /* 52 (Diva Server 4BRI-8M PCI adapter enabled for Voice) */
-		"Diva Server Voice 4BRI-8M PCI", 0xE016,  0x0100,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
-		CARD_MAEQ,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		4, 2,   16,  8,   0
-	},
-	{ /* 53 (Diva Server 4BRI 2.0 adapter) */
-		"Diva Server 4BRI-8M 2.0 PCI",  0xE013,  0x0200,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAEQ,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		4, 2,   16,  8,   0
-	},
-	{ /* 54 (Diva Server PRI 2.0 adapter) */
-		"Diva Server PRI 2.0 PCI",   0xE015,  0x0200,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAEP,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 30,  256,  8,   0
-	},
-	{ /* 55 (Diva Server 4BRI-8M 2.0 PCI adapter enabled for Voice) */
-		"Diva Server Voice 4BRI-8M 2.0 PCI", 0xE017,  0x0200,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
-		CARD_MAEQ,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		4, 2,   16,  8,   0
-	},
-	{ /* 56 (Diva Server PRI 2.0 PCI adapter enabled for Voice) */
-		"Diva Server Voice PRI 2.0 PCI",  0xE019,  0x0200,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
-		CARD_MAEP,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 30,  256,  8,   0
-	},
-	{
-		/* 57 (DivaLan )      no ID */
-		"Diva LAN",       0x0000,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V110 | DI_FAX3 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALAN,  CARD_I_NONE, BUS_LAN, CHIP_EXTERN,
-		1, 1,   0,   8,   0
-	},
-	{ /* 58 */
-		"Diva 2.02 PCI S/T",    0xE00B,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES | DI_SOFT_V110,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_PCI, CHIP_IPACX,
-		1, 2,   0,   8,   0
-	},
-	{ /* 59 */
-		"Diva 2.02 PCI U",     0xE00C,  0x0300,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_PCI, CHIP_IPACX,
-		1, 2,   0,   8,   0
-	},
-	{ /* 60 */
-		"Diva Server BRI-2M 2.0 PCI",     0xE018,  0x0200,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_MAE2,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 2,   16,  8,   0
-	},
-	{ /* 61  (the previous name was Diva Server BRI-2F 2.0 PCI) */
-		"Diva Server 2FX",                      0xE01A,     0x0200,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_SOFT_V110,
-		CARD_MAE2,          CARD_I_NONE,    BUS_PCI,    CHIP_IPACX,
-		1,  2,      16,     8,   0
-	},
-	{ /* 62 */
-		" Diva ISDN USB 2.0",    0x1003,     0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_DIVALOW,  CARD_I_NONE, BUS_USB, CHIP_IPACX,
-		1, 2,  0,  8,   0
-	},
-	{ /* 63 (Diva Server BRI-2M 2.0 PCI adapter enabled for Voice) */
-		"Diva Server Voice BRI-2M 2.0 PCI", 0xE01B,  0x0200,
-		IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
-		CARD_MAE2,   CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1, 2,   16,  8,   0
-	},
-	{ /* 64 */
-		"Diva Pro 3.0 PCI",    0xe00d,  0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM,
-		CARD_PRO,       CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1,  2,  0,  0,   0
-	},
-	{ /* 65 */
-		"Diva ISDN + CT 2.0",    0xE00E,  0x0300,
-		IDI_ADAPTER_DIVA   , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
-		CARD_CT,       CARD_I_NONE, BUS_PCI, CHIP_DSP,
-		1,  2,  0,  0,   0
-	},
-	{ /* 66 */
-		"Diva Mobile V.90 PC Card",  0x8331,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCM, CHIP_IPACX,
-		1, 2,  0,   8,   0
-	},
-	{ /* 67 */
-		"Diva ISDN PC Card",  0x8311,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PICO,   CARD_I_NONE, BUS_PCM, CHIP_IPACX,
-		1, 2,  0,   8,   0
-	},
-	{ /* 68 */
-		"Diva ISDN PC Card",  0x0000,  0x0100,
-		IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
-		CARD_PRO,   CARD_I_NONE, BUS_PCM, CHIP_DSP,
-		1, 2,   0,   8,      0
-	},
-};
-#if CARDTYPE_H_WANT_RESOURCE_DATA
-/*--- CardResource [Index=CARDTYPE_....]   ---------------------------(GEI)-*/
-CARD_RESOURCE CardResource[] = {
-/*   Interrupts     IO-Address   Mem-Address */
-	/* 0*/ {  3,4,9,0,0,0,0,0,0,0,   0x200,0x20,16,   0x0,0x0,0   }, // DIVA MCA
-	/* 1*/ {  3,4,9,10,11,12,0,0,0,0,  0x200,0x20,16,   0x0,0x0,0   }, // DIVA ISA
-	/* 2*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA PCMCIA
-	/* 3*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,   0x0,0x0,0   }, // DIVA PRO ISA
-	/* 4*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA PRO PCMCIA
-	/* 5*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // DIVA PICCOLA ISA
-	/* 6*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA PICCOLA PCMCIA
-	/* 7*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA PRO 2.0 PCI
-	/* 8*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA 2.0 PCI
-	/* 9*/ {  3,4,5,7,9,10,11,12,0,0,  0x0,0x0,0,   0x80000,0x2000,64 }, // QUADRO ISA
-	/*10*/ {  3,4,9,10,11,12,0,0,0,0,  0x0,0x0,0,   0xc0000,0x2000,16 }, // S ISA
-	/*11*/ {  3,4,9,0,0,0,0,0,0,0,  0xc00,0x10,16,  0xc0000,0x2000,16 }, // S MCA
-	/*12*/ {  3,4,9,10,11,12,0,0,0,0,  0x0,0x0,0,   0xc0000,0x2000,16 }, // SX ISA
-	/*13*/ {  3,4,9,0,0,0,0,0,0,0,  0xc00,0x10,16,  0xc0000,0x2000,16 }, // SX MCA
-	/*14*/ {  3,4,5,7,9,10,11,12,0,0,  0x0,0x0,0,   0x80000,0x0800,256 }, // SXN ISA
-	/*15*/ {  3,4,9,0,0,0,0,0,0,0,  0xc00,0x10,16,  0xc0000,0x2000,16 }, // SXN MCA
-	/*16*/ {  3,4,5,7,9,10,11,12,0,0,  0x0,0x0,0,   0x80000,0x0800,256 }, // SCOM ISA
-	/*17*/ {  3,4,9,0,0,0,0,0,0,0,  0xc00,0x10,16,  0xc0000,0x2000,16 }, // SCOM MCA
-	/*18*/ {  3,4,5,7,9,10,11,12,0,0,  0x0,0x0,0,   0xc0000,0x4000,16 }, // S2M ISA
-	/*19*/ {  3,4,9,0,0,0,0,0,0,0,  0xc00,0x10,16,  0xc0000,0x4000,16 }, // S2M MCA
-	/*20*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // MAESTRA ISA
-	/*21*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // MAESTRA PCI
-	/*22*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // MAESTRA QUADRO ISA
-	/*23*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048,  0x0,0x0,0   }, // MAESTRA QUADRO PCI
-	/*24*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // MAESTRA PRIMARY ISA
-	/*25*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // MAESTRA PRIMARY PCI
-	/*26*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // DIVA 2.0 ISA
-	/*27*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // DIVA 2.0 /U ISA
-	/*28*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA 2.0 /U PCI
-	/*29*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,   0x0,0x0,0   }, // DIVA PRO 2.0 ISA
-	/*30*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,   0x0,0x0,0   }, // DIVA PRO 2.0 /U ISA
-	/*31*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA PRO 2.0 /U PCI
-	/*32*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA MOBILE
-	/*33*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // TDK DFI3600 (same as DIVA MOBILE [32])
-	/*34*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // New Media ISDN (same as DIVA PRO PCMCIA [4])
-	/*35*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // BT ExLane PCI (same as DIVA PRO 2.0 PCI [7])
-	/*36*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,   0x0,0x0,0   }, // BT ExLane ISA (same as DIVA PRO 2.0 ISA [29])
-	/*37*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // DIVA 2.01 S/T ISA
-	/*38*/ {  3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16,  0x0,0x0,0   }, // DIVA 2.01 U ISA
-	/*39*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA 2.01 S/T PCI
-	/*40*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA 2.01 U PCI
-	/*41*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA MOBILE V.90
-	/*42*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // TDK DFI3600 V.90 (same as DIVA MOBILE V.90 [39])
-	/*43*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048,  0x0,0x0,0   }, // DIVA Server PRI-23M PCI
-	/*44*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA 2.01 S/T USB
-	/*45*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA CT S/T PCI
-	/*46*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA CT U PCI
-	/*47*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA CT Lite S/T PCI
-	/*48*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA CT Lite U PCI
-	/*49*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA ISDN+V.90 PC Card
-	/*50*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA ISDN+V.90 PCI
-	/*51*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA TA
-	/*52*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048,  0x0,0x0,0   }, // MAESTRA VOICE QUADRO PCI
-	/*53*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048,  0x0,0x0,0   }, // MAESTRA VOICE QUADRO PCI
-	/*54*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // MAESTRA VOICE PRIMARY PCI
-	/*55*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048,  0x0,0x0,0   }, // MAESTRA VOICE QUADRO PCI
-	/*56*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // MAESTRA VOICE PRIMARY PCI
-	/*57*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA LAN
-	/*58*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA 2.02 S/T PCI
-	/*59*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA 2.02 U PCI
-	/*60*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // Diva Server BRI-2M 2.0 PCI
-	/*61*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // Diva Server BRI-2F PCI
-	/*62*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA 2.01 S/T USB
-	/*63*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // Diva Server Voice BRI-2M 2.0 PCI
-	/*64*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA 3.0 PCI
-	/*65*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA CT S/T PCI V2.0
-	/*66*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA Mobile V.90 PC Card
-	/*67*/ {  0,0,0,0,0,0,0,0,0,0,  0x0,0x0,0,   0x0,0x0,0   }, // DIVA ISDN PC Card
-	/*68*/ {  3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192,  0x0,0x0,0   }, // DIVA ISDN PC Card
-};
-#endif /*CARDTYPE_H_WANT_RESOURCE_DATA*/
-#else /*!CARDTYPE_H_WANT_DATA*/
-extern CARD_PROPERTIES  CardProperties[];
-extern CARD_RESOURCE  CardResource[];
-#endif /*CARDTYPE_H_WANT_DATA*/
-/*
- * all existing download files
- */
-#define CARD_DSP_CNT  5
-#define CARD_PROT_CNT  9
-#define CARD_FT_UNKNOWN     0
-#define CARD_FT_B   1
-#define CARD_FT_D   2
-#define CARD_FT_S   3
-#define CARD_FT_M   4
-#define CARD_FT_NEW_DSP_COMBIFILE 5  /* File format of new DSP code (the DSP code powered by Telindus) */
-#define CARD_FILE_NONE      0
-#define CARD_B_S   1
-#define CARD_B_P   2
-#define CARD_D_K1   3
-#define CARD_D_K2   4
-#define CARD_D_H   5
-#define CARD_D_V   6
-#define CARD_D_M   7
-#define CARD_D_F   8
-#define CARD_P_S_E   9
-#define CARD_P_S_1   10
-#define CARD_P_S_B   11
-#define CARD_P_S_F   12
-#define CARD_P_S_A   13
-#define CARD_P_S_N   14
-#define CARD_P_S_5   15
-#define CARD_P_S_J   16
-#define CARD_P_SX_E   17
-#define CARD_P_SX_1   18
-#define CARD_P_SX_B   19
-#define CARD_P_SX_F   20
-#define CARD_P_SX_A   21
-#define CARD_P_SX_N   22
-#define CARD_P_SX_5   23
-#define CARD_P_SX_J   24
-#define CARD_P_SY_E   25
-#define CARD_P_SY_1   26
-#define CARD_P_SY_B   27
-#define CARD_P_SY_F   28
-#define CARD_P_SY_A   29
-#define CARD_P_SY_N   30
-#define CARD_P_SY_5   31
-#define CARD_P_SY_J   32
-#define CARD_P_SQ_E   33
-#define CARD_P_SQ_1   34
-#define CARD_P_SQ_B   35
-#define CARD_P_SQ_F   36
-#define CARD_P_SQ_A   37
-#define CARD_P_SQ_N   38
-#define CARD_P_SQ_5   39
-#define CARD_P_SQ_J   40
-#define CARD_P_P_E   41
-#define CARD_P_P_1   42
-#define CARD_P_P_B   43
-#define CARD_P_P_F   44
-#define CARD_P_P_A   45
-#define CARD_P_P_N   46
-#define CARD_P_P_5   47
-#define CARD_P_P_J   48
-#define CARD_P_M_E   49
-#define CARD_P_M_1   50
-#define CARD_P_M_B   51
-#define CARD_P_M_F   52
-#define CARD_P_M_A   53
-#define CARD_P_M_N   54
-#define CARD_P_M_5   55
-#define CARD_P_M_J   56
-#define CARD_P_S_S   57
-#define CARD_P_SX_S   58
-#define CARD_P_SY_S   59
-#define CARD_P_SQ_S   60
-#define CARD_P_P_S   61
-#define CARD_P_M_S   62
-#define CARD_D_NEW_DSP_COMBIFILE 63
-typedef struct CARD_FILES_DATA
-{
-	char *Name;
-	unsigned char  Type;
-}
-	CARD_FILES_DATA;
-typedef struct CARD_FILES
-{
-	unsigned char  Boot;
-	unsigned char  Dsp[CARD_DSP_CNT];
-	unsigned char  DspTelindus;
-	unsigned char  Prot[CARD_PROT_CNT];
-}
-	CARD_FILES;
-#if CARDTYPE_H_WANT_DATA
-#if CARDTYPE_H_WANT_FILE_DATA
-CARD_FILES_DATA CardFData[] = {
-// Filename   Filetype
-	0,     CARD_FT_UNKNOWN,
-	"didnload.bin",  CARD_FT_B,
-	"diprload.bin",  CARD_FT_B,
-	"didiva.bin",  CARD_FT_D,
-	"didivapp.bin",  CARD_FT_D,
-	"dihscx.bin",  CARD_FT_D,
-	"div110.bin",  CARD_FT_D,
-	"dimodem.bin",  CARD_FT_D,
-	"difax.bin",  CARD_FT_D,
-	"di_etsi.bin",  CARD_FT_S,
-	"di_1tr6.bin",  CARD_FT_S,
-	"di_belg.bin",  CARD_FT_S,
-	"di_franc.bin",  CARD_FT_S,
-	"di_atel.bin",  CARD_FT_S,
-	"di_ni.bin",  CARD_FT_S,
-	"di_5ess.bin",  CARD_FT_S,
-	"di_japan.bin",  CARD_FT_S,
-	"di_etsi.sx",  CARD_FT_S,
-	"di_1tr6.sx",  CARD_FT_S,
-	"di_belg.sx",  CARD_FT_S,
-	"di_franc.sx",  CARD_FT_S,
-	"di_atel.sx",  CARD_FT_S,
-	"di_ni.sx",   CARD_FT_S,
-	"di_5ess.sx",  CARD_FT_S,
-	"di_japan.sx",  CARD_FT_S,
-	"di_etsi.sy",  CARD_FT_S,
-	"di_1tr6.sy",  CARD_FT_S,
-	"di_belg.sy",  CARD_FT_S,
-	"di_franc.sy",  CARD_FT_S,
-	"di_atel.sy",  CARD_FT_S,
-	"di_ni.sy",   CARD_FT_S,
-	"di_5ess.sy",  CARD_FT_S,
-	"di_japan.sy",  CARD_FT_S,
-	"di_etsi.sq",  CARD_FT_S,
-	"di_1tr6.sq",  CARD_FT_S,
-	"di_belg.sq",  CARD_FT_S,
-	"di_franc.sq",  CARD_FT_S,
-	"di_atel.sq",  CARD_FT_S,
-	"di_ni.sq",   CARD_FT_S,
-	"di_5ess.sq",  CARD_FT_S,
-	"di_japan.sq",  CARD_FT_S,
-	"di_etsi.p",  CARD_FT_S,
-	"di_1tr6.p",  CARD_FT_S,
-	"di_belg.p",  CARD_FT_S,
-	"di_franc.p",  CARD_FT_S,
-	"di_atel.p",  CARD_FT_S,
-	"di_ni.p",   CARD_FT_S,
-	"di_5ess.p",  CARD_FT_S,
-	"di_japan.p",  CARD_FT_S,
-	"di_etsi.sm",  CARD_FT_M,
-	"di_1tr6.sm",  CARD_FT_M,
-	"di_belg.sm",  CARD_FT_M,
-	"di_franc.sm",  CARD_FT_M,
-	"di_atel.sm",  CARD_FT_M,
-	"di_ni.sm",   CARD_FT_M,
-	"di_5ess.sm",  CARD_FT_M,
-	"di_japan.sm",  CARD_FT_M,
-	"di_swed.bin",  CARD_FT_S,
-	"di_swed.sx",  CARD_FT_S,
-	"di_swed.sy",  CARD_FT_S,
-	"di_swed.sq",  CARD_FT_S,
-	"di_swed.p",  CARD_FT_S,
-	"di_swed.sm",  CARD_FT_M,
-	"didspdld.bin",     CARD_FT_NEW_DSP_COMBIFILE
-};
-CARD_FILES CardFiles[] =
-{
-	{ /* CARD_UNKNOWN */
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE
-	},
-	{ /* CARD_DIVA */
-		CARD_FILE_NONE,
-		CARD_D_K1, CARD_D_H, CARD_D_V, CARD_FILE_NONE, CARD_D_F,
-		CARD_D_NEW_DSP_COMBIFILE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE
-	},
-	{ /* CARD_PRO  */
-		CARD_FILE_NONE,
-		CARD_D_K2, CARD_D_H, CARD_D_V, CARD_D_M, CARD_D_F,
-		CARD_D_NEW_DSP_COMBIFILE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE
-	},
-	{ /* CARD_PICO */
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE
-	},
-	{ /* CARD_S    */
-		CARD_B_S,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_P_S_E, CARD_P_S_1, CARD_P_S_B, CARD_P_S_F,
-		CARD_P_S_A, CARD_P_S_N, CARD_P_S_5, CARD_P_S_J,
-		CARD_P_S_S
-	},
-	{ /* CARD_SX   */
-		CARD_B_S,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_P_SX_E, CARD_P_SX_1, CARD_P_SX_B, CARD_P_SX_F,
-		CARD_P_SX_A, CARD_P_SX_N, CARD_P_SX_5, CARD_P_SX_J,
-		CARD_P_SX_S
-	},
-	{ /* CARD_SXN  */
-		CARD_B_S,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_P_SY_E, CARD_P_SY_1, CARD_P_SY_B, CARD_P_SY_F,
-		CARD_P_SY_A, CARD_P_SY_N, CARD_P_SY_5, CARD_P_SY_J,
-		CARD_P_SY_S
-	},
-	{ /* CARD_SCOM */
-		CARD_B_S,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_P_SY_E, CARD_P_SY_1, CARD_P_SY_B, CARD_P_SY_F,
-		CARD_P_SY_A, CARD_P_SY_N, CARD_P_SY_5, CARD_P_SY_J,
-		CARD_P_SY_S
-	},
-	{ /* CARD_QUAD */
-		CARD_B_S,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_P_SQ_E, CARD_P_SQ_1, CARD_P_SQ_B, CARD_P_SQ_F,
-		CARD_P_SQ_A, CARD_P_SQ_N, CARD_P_SQ_5, CARD_P_SQ_J,
-		CARD_P_SQ_S
-	},
-	{ /* CARD_PR   */
-		CARD_B_P,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_P_P_E, CARD_P_P_1, CARD_P_P_B, CARD_P_P_F,
-		CARD_P_P_A, CARD_P_P_N, CARD_P_P_5, CARD_P_P_J,
-		CARD_P_P_S
-	},
-	{ /* CARD_MAE  */
-		CARD_FILE_NONE,
-		CARD_D_K2, CARD_D_H, CARD_D_V, CARD_D_M, CARD_D_F,
-		CARD_D_NEW_DSP_COMBIFILE,
-		CARD_P_M_E, CARD_P_M_1, CARD_P_M_B, CARD_P_M_F,
-		CARD_P_M_A, CARD_P_M_N, CARD_P_M_5, CARD_P_M_J,
-		CARD_P_M_S
-	},
-	{ /* CARD_MAEQ */  /* currently not supported */
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE
-	},
-	{ /* CARD_MAEP */  /* currently not supported */
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
-		CARD_FILE_NONE
-	}
-};
-#endif /*CARDTYPE_H_WANT_FILE_DATA*/
-#else /*!CARDTYPE_H_WANT_DATA*/
-extern CARD_FILES_DATA  CardFData[];
-extern CARD_FILES   CardFiles[];
-#endif /*CARDTYPE_H_WANT_DATA*/
-#endif /* _CARDTYPE_H_ */
diff --git a/drivers/isdn/hardware/eicon/cp_vers.h b/drivers/isdn/hardware/eicon/cp_vers.h
deleted file mode 100644
index c97230c..0000000
--- a/drivers/isdn/hardware/eicon/cp_vers.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-static char diva_capi_common_code_build[] = "102-28";
diff --git a/drivers/isdn/hardware/eicon/dadapter.c b/drivers/isdn/hardware/eicon/dadapter.c
deleted file mode 100644
index 5142099..0000000
--- a/drivers/isdn/hardware/eicon/dadapter.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "pc.h"
-#include "debuglib.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "dadapter.h"
-/* --------------------------------------------------------------------------
-   Adapter array change notification framework
-   -------------------------------------------------------------------------- */
-typedef struct _didd_adapter_change_notification {
-	didd_adapter_change_callback_t callback;
-	void IDI_CALL_ENTITY_T *context;
-} didd_adapter_change_notification_t,				\
-	* IDI_CALL_ENTITY_T pdidd_adapter_change_notification_t;
-#define DIVA_DIDD_MAX_NOTIFICATIONS 256
-static didd_adapter_change_notification_t	\
-NotificationTable[DIVA_DIDD_MAX_NOTIFICATIONS];
-/* --------------------------------------------------------------------------
-   Array to held adapter information
-   -------------------------------------------------------------------------- */
-static DESCRIPTOR  HandleTable[NEW_MAX_DESCRIPTORS];
-static dword Adapters = 0; /* Number of adapters */
-/* --------------------------------------------------------------------------
-   Shadow IDI_DIMAINT
-   and 'shadow' debug stuff
-   -------------------------------------------------------------------------- */
-static void no_printf(unsigned char *format, ...)
-{
-#ifdef EBUG
-	va_list ap;
-	va_start(ap, format);
-	debug((format, ap));
-	va_end(ap);
-#endif
-}
-
-/* -------------------------------------------------------------------------
-   Portable debug Library
-   ------------------------------------------------------------------------- */
-#include "debuglib.c"
-
-static DESCRIPTOR  MAdapter =  {IDI_DIMAINT, /* Adapter Type */
-				0x00,     /* Channels */
-				0x0000,    /* Features */
-				(IDI_CALL)no_printf};
-/* --------------------------------------------------------------------------
-   DAdapter. Only IDI clients with buffer, that is huge enough to
-   get all descriptors will receive information about DAdapter
-   { byte type, byte channels, word features, IDI_CALL request }
-   -------------------------------------------------------------------------- */
-static void IDI_CALL_LINK_T diva_dadapter_request(ENTITY IDI_CALL_ENTITY_T *);
-static DESCRIPTOR  DAdapter =  {IDI_DADAPTER, /* Adapter Type */
-				0x00,     /* Channels */
-				0x0000,    /* Features */
-				diva_dadapter_request };
-/* --------------------------------------------------------------------------
-   LOCALS
-   -------------------------------------------------------------------------- */
-static dword diva_register_adapter_callback(\
-	didd_adapter_change_callback_t callback,
-	void IDI_CALL_ENTITY_T *context);
-static void diva_remove_adapter_callback(dword handle);
-static void diva_notify_adapter_change(DESCRIPTOR *d, int removal);
-static diva_os_spin_lock_t didd_spin;
-/* --------------------------------------------------------------------------
-   Should be called as first step, after driver init
-   -------------------------------------------------------------------------- */
-void diva_didd_load_time_init(void) {
-	memset(&HandleTable[0], 0x00, sizeof(HandleTable));
-	memset(&NotificationTable[0], 0x00, sizeof(NotificationTable));
-	diva_os_initialize_spin_lock(&didd_spin, "didd");
-}
-/* --------------------------------------------------------------------------
-   Should be called as last step, if driver does unload
-   -------------------------------------------------------------------------- */
-void diva_didd_load_time_finit(void) {
-	diva_os_destroy_spin_lock(&didd_spin, "didd");
-}
-/* --------------------------------------------------------------------------
-   Called in order to register new adapter in adapter array
-   return adapter handle (> 0) on success
-   return -1 adapter array overflow
-   -------------------------------------------------------------------------- */
-static int diva_didd_add_descriptor(DESCRIPTOR *d) {
-	diva_os_spin_lock_magic_t      irql;
-	int i;
-	if (d->type == IDI_DIMAINT) {
-		if (d->request) {
-			MAdapter.request = d->request;
-			dprintf = (DIVA_DI_PRINTF)d->request;
-			diva_notify_adapter_change(&MAdapter, 0); /* Inserted */
-			DBG_TRC(("DIMAINT registered, dprintf=%08x", d->request))
-				} else {
-			DBG_TRC(("DIMAINT removed"))
-				diva_notify_adapter_change(&MAdapter, 1); /* About to remove */
-			MAdapter.request = (IDI_CALL)no_printf;
-			dprintf = no_printf;
-		}
-		return (NEW_MAX_DESCRIPTORS);
-	}
-	for (i = 0; i < NEW_MAX_DESCRIPTORS; i++) {
-		diva_os_enter_spin_lock(&didd_spin, &irql, "didd_add");
-		if (HandleTable[i].type == 0) {
-			memcpy(&HandleTable[i], d, sizeof(*d));
-			Adapters++;
-			diva_os_leave_spin_lock(&didd_spin, &irql, "didd_add");
-			diva_notify_adapter_change(d, 0); /* we have new adapter */
-			DBG_TRC(("Add adapter[%d], request=%08x", (i + 1), d->request))
-				return (i + 1);
-		}
-		diva_os_leave_spin_lock(&didd_spin, &irql, "didd_add");
-	}
-	DBG_ERR(("Can't add adapter, out of resources"))
-		return (-1);
-}
-/* --------------------------------------------------------------------------
-   Called in order to remove one registered adapter from array
-   return adapter handle (> 0) on success
-   return 0 on success
-   -------------------------------------------------------------------------- */
-static int diva_didd_remove_descriptor(IDI_CALL request) {
-	diva_os_spin_lock_magic_t      irql;
-	int i;
-	if (request == MAdapter.request) {
-		DBG_TRC(("DIMAINT removed"))
-			dprintf = no_printf;
-		diva_notify_adapter_change(&MAdapter, 1); /* About to remove */
-		MAdapter.request = (IDI_CALL)no_printf;
-		return (0);
-	}
-	for (i = 0; (Adapters && (i < NEW_MAX_DESCRIPTORS)); i++) {
-		if (HandleTable[i].request == request) {
-			diva_notify_adapter_change(&HandleTable[i], 1); /* About to remove */
-			diva_os_enter_spin_lock(&didd_spin, &irql, "didd_rm");
-			memset(&HandleTable[i], 0x00, sizeof(HandleTable[0]));
-			Adapters--;
-			diva_os_leave_spin_lock(&didd_spin, &irql, "didd_rm");
-			DBG_TRC(("Remove adapter[%d], request=%08x", (i + 1), request))
-				return (0);
-		}
-	}
-	DBG_ERR(("Invalid request=%08x, can't remove adapter", request))
-		return (-1);
-}
-/* --------------------------------------------------------------------------
-   Read adapter array
-   return 1 if not enough space to save all available adapters
-   -------------------------------------------------------------------------- */
-static int diva_didd_read_adapter_array(DESCRIPTOR *buffer, int length) {
-	diva_os_spin_lock_magic_t      irql;
-	int src, dst;
-	memset(buffer, 0x00, length);
-	length /= sizeof(DESCRIPTOR);
-	DBG_TRC(("DIDD_Read, space = %d, Adapters = %d", length, Adapters + 2))
-
-		diva_os_enter_spin_lock(&didd_spin, &irql, "didd_read");
-	for (src = 0, dst = 0;
-	     (Adapters && (src < NEW_MAX_DESCRIPTORS) && (dst < length));
-	     src++) {
-		if (HandleTable[src].type) {
-			memcpy(&buffer[dst], &HandleTable[src], sizeof(DESCRIPTOR));
-			dst++;
-		}
-	}
-	diva_os_leave_spin_lock(&didd_spin, &irql, "didd_read");
-	if (dst < length) {
-		memcpy(&buffer[dst], &MAdapter, sizeof(DESCRIPTOR));
-		dst++;
-	} else {
-		DBG_ERR(("Can't write DIMAINT. Array too small"))
-			}
-	if (dst < length) {
-		memcpy(&buffer[dst], &DAdapter, sizeof(DESCRIPTOR));
-		dst++;
-	} else {
-		DBG_ERR(("Can't write DADAPTER. Array too small"))
-			}
-	DBG_TRC(("Read %d adapters", dst))
-		return (dst == length);
-}
-/* --------------------------------------------------------------------------
-   DAdapter request function.
-   This function does process only synchronous requests, and is used
-   for reception/registration of new interfaces
-   -------------------------------------------------------------------------- */
-static void IDI_CALL_LINK_T diva_dadapter_request(	\
-	ENTITY IDI_CALL_ENTITY_T *e) {
-	IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e;
-	if (e->Req) { /* We do not process it, also return error */
-		e->Rc = OUT_OF_RESOURCES;
-		DBG_ERR(("Can't process async request, Req=%02x", e->Req))
-			return;
-	}
-	/*
-	  So, we process sync request
-	*/
-	switch (e->Rc) {
-	case IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY: {
-		diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info;
-		pinfo->handle = diva_register_adapter_callback(		\
-			(didd_adapter_change_callback_t)pinfo->callback,
-			(void IDI_CALL_ENTITY_T *)pinfo->context);
-		e->Rc = 0xff;
-	} break;
-	case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY: {
-		diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info;
-		diva_remove_adapter_callback(pinfo->handle);
-		e->Rc = 0xff;
-	} break;
-	case IDI_SYNC_REQ_DIDD_ADD_ADAPTER: {
-		diva_didd_add_adapter_t *pinfo = &syncReq->didd_add_adapter.info;
-		if (diva_didd_add_descriptor((DESCRIPTOR *)pinfo->descriptor) < 0) {
-			e->Rc = OUT_OF_RESOURCES;
-		} else {
-			e->Rc = 0xff;
-		}
-	} break;
-	case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER: {
-		diva_didd_remove_adapter_t *pinfo = &syncReq->didd_remove_adapter.info;
-		if (diva_didd_remove_descriptor((IDI_CALL)pinfo->p_request) < 0) {
-			e->Rc = OUT_OF_RESOURCES;
-		} else {
-			e->Rc = 0xff;
-		}
-	} break;
-	case IDI_SYNC_REQ_DIDD_READ_ADAPTER_ARRAY: {
-		diva_didd_read_adapter_array_t *pinfo =\
-			&syncReq->didd_read_adapter_array.info;
-		if (diva_didd_read_adapter_array((DESCRIPTOR *)pinfo->buffer,
-						  (int)pinfo->length)) {
-			e->Rc = OUT_OF_RESOURCES;
-		} else {
-			e->Rc = 0xff;
-		}
-	} break;
-	default:
-		DBG_ERR(("Can't process sync request, Req=%02x", e->Rc))
-			e->Rc = OUT_OF_RESOURCES;
-	}
-}
-/* --------------------------------------------------------------------------
-   IDI client does register his notification function
-   -------------------------------------------------------------------------- */
-static dword diva_register_adapter_callback(		\
-	didd_adapter_change_callback_t callback,
-	void IDI_CALL_ENTITY_T *context) {
-	diva_os_spin_lock_magic_t irql;
-	dword i;
-
-	for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) {
-		diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy_add");
-		if (!NotificationTable[i].callback) {
-			NotificationTable[i].callback = callback;
-			NotificationTable[i].context = context;
-			diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_add");
-			DBG_TRC(("Register adapter notification[%d]=%08x", i + 1, callback))
-				return (i + 1);
-		}
-		diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_add");
-	}
-	DBG_ERR(("Can't register adapter notification, overflow"))
-		return (0);
-}
-/* --------------------------------------------------------------------------
-   IDI client does register his notification function
-   -------------------------------------------------------------------------- */
-static void diva_remove_adapter_callback(dword handle) {
-	diva_os_spin_lock_magic_t irql;
-	if (handle && ((--handle) < DIVA_DIDD_MAX_NOTIFICATIONS)) {
-		diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy_rm");
-		NotificationTable[handle].callback = NULL;
-		NotificationTable[handle].context  = NULL;
-		diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_rm");
-		DBG_TRC(("Remove adapter notification[%d]", (int)(handle + 1)))
-			return;
-	}
-	DBG_ERR(("Can't remove adapter notification, handle=%d", handle))
-		}
-/* --------------------------------------------------------------------------
-   Notify all client about adapter array change
-   Does suppose following behavior in the client side:
-   Step 1: Redister Notification
-   Step 2: Read Adapter Array
-   -------------------------------------------------------------------------- */
-static void diva_notify_adapter_change(DESCRIPTOR *d, int removal) {
-	int i, do_notify;
-	didd_adapter_change_notification_t nfy;
-	diva_os_spin_lock_magic_t irql;
-	for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) {
-		do_notify = 0;
-		diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy");
-		if (NotificationTable[i].callback) {
-			memcpy(&nfy, &NotificationTable[i], sizeof(nfy));
-			do_notify = 1;
-		}
-		diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy");
-		if (do_notify) {
-			(*(nfy.callback))(nfy.context, d, removal);
-		}
-	}
-}
-/* --------------------------------------------------------------------------
-   For all systems, that are linked by Kernel Mode Linker this is ONLY one
-   function thet should be exported by this device driver
-   IDI clients should look for IDI_DADAPTER, and use request function
-   of this adapter (sync request) in order to receive appropriate services:
-   - add new adapter
-   - remove existing adapter
-   - add adapter array notification
-   - remove adapter array notification
-   (read adapter is redundant in this case)
-   INPUT:
-   buffer - pointer to buffer that will receive adapter array
-   length  - length (in bytes) of space in buffer
-   OUTPUT:
-   Adapter array will be written to memory described by 'buffer'
-   If the last adapter seen in the returned adapter array is
-   IDI_DADAPTER or if last adapter in array does have type '0', then
-   it was enougth space in buffer to accommodate all available
-   adapter descriptors
-   *NOTE 1 (debug interface):
-   The IDI adapter of type 'IDI_DIMAINT' does register as 'request'
-   famous 'dprintf' function (of type DI_PRINTF, please look
-   include/debuglib.c and include/debuglib.h) for details.
-   So dprintf is not exported from module debug module directly,
-   instead of this IDI_DIMAINT is registered.
-   Module load order will receive in this case:
-   1. DIDD (this file)
-   2. DIMAINT does load and register 'IDI_DIMAINT', at this step
-   DIDD should be able to get 'dprintf', save it, and
-   register with DIDD by means of 'dprintf' function.
-   3. any other driver is loaded and is able to access adapter array
-   and debug interface
-   This approach does allow to load/unload debug interface on demand,
-   and save memory, it it is necessary.
-   -------------------------------------------------------------------------- */
-void IDI_CALL_LINK_T DIVA_DIDD_Read(void IDI_CALL_ENTITY_T *buffer,
-				    int length) {
-	diva_didd_read_adapter_array(buffer, length);
-}
diff --git a/drivers/isdn/hardware/eicon/dadapter.h b/drivers/isdn/hardware/eicon/dadapter.h
deleted file mode 100644
index 5540f46..0000000
--- a/drivers/isdn/hardware/eicon/dadapter.h
+++ /dev/null
@@ -1,34 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DIDD_DADAPTER_INC__
-#define __DIVA_DIDD_DADAPTER_INC__
-
-void diva_didd_load_time_init(void);
-void diva_didd_load_time_finit(void);
-
-#define NEW_MAX_DESCRIPTORS     64
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
deleted file mode 100644
index 3017881..0000000
--- a/drivers/isdn/hardware/eicon/debug.c
+++ /dev/null
@@ -1,2128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "platform.h"
-#include "pc.h"
-#include "di_defs.h"
-#include "debug_if.h"
-#include "divasync.h"
-#include "kst_ifc.h"
-#include "maintidi.h"
-#include "man_defs.h"
-
-/*
-  LOCALS
-*/
-#define DBG_MAGIC (0x47114711L)
-
-static void DI_register(void *arg);
-static void DI_deregister(pDbgHandle hDbg);
-static void DI_format(int do_lock, word id, int type, char *format, va_list argument_list);
-static void DI_format_locked(word id, int type, char *format, va_list argument_list);
-static void DI_format_old(word id, char *format, va_list ap) { }
-static void DiProcessEventLog(unsigned short id, unsigned long msgID, va_list ap) { }
-static void single_p(byte *P, word *PLength, byte Id);
-static void diva_maint_xdi_cb(ENTITY *e);
-static word SuperTraceCreateReadReq(byte *P, const char *path);
-static int diva_mnt_cmp_nmbr(const char *nmbr);
-static void diva_free_dma_descriptor(IDI_CALL request, int nr);
-static int diva_get_dma_descriptor(IDI_CALL request, dword *dma_magic);
-void diva_mnt_internal_dprintf(dword drv_id, dword type, char *p, ...);
-
-static dword MaxDumpSize = 256;
-static dword MaxXlogSize = 2 + 128;
-static char  TraceFilter[DIVA_MAX_SELECTIVE_FILTER_LENGTH + 1];
-static int TraceFilterIdent   = -1;
-static int TraceFilterChannel = -1;
-
-typedef struct _diva_maint_client {
-	dword       sec;
-	dword       usec;
-	pDbgHandle  hDbg;
-	char        drvName[128];
-	dword       dbgMask;
-	dword       last_dbgMask;
-	IDI_CALL    request;
-	_DbgHandle_ Dbg;
-	int         logical;
-	int         channels;
-	diva_strace_library_interface_t *pIdiLib;
-	BUFFERS     XData;
-	char        xbuffer[2048 + 512];
-	byte        *pmem;
-	int         request_pending;
-	int         dma_handle;
-} diva_maint_client_t;
-static diva_maint_client_t clients[MAX_DESCRIPTORS];
-
-static void diva_change_management_debug_mask(diva_maint_client_t *pC, dword old_mask);
-
-static void diva_maint_error(void *user_context,
-			     diva_strace_library_interface_t *hLib,
-			     int Adapter,
-			     int error,
-			     const char *file,
-			     int line);
-static void diva_maint_state_change_notify(void *user_context,
-					   diva_strace_library_interface_t *hLib,
-					   int Adapter,
-					   diva_trace_line_state_t *channel,
-					   int notify_subject);
-static void diva_maint_trace_notify(void *user_context,
-				    diva_strace_library_interface_t *hLib,
-				    int Adapter,
-				    void *xlog_buffer,
-				    int length);
-
-
-
-typedef struct MSG_QUEUE {
-	dword	Size;		/* total size of queue (constant)	*/
-	byte	*Base;		/* lowest address (constant)		*/
-	byte	*High;		/* Base + Size (constant)		*/
-	byte	*Head;		/* first message in queue (if any)	*/
-	byte	*Tail;		/* first free position			*/
-	byte	*Wrap;		/* current wraparound position		*/
-	dword	Count;		/* current no of bytes in queue		*/
-} MSG_QUEUE;
-
-typedef struct MSG_HEAD {
-	volatile dword	Size;		/* size of data following MSG_HEAD	*/
-#define	MSG_INCOMPLETE	0x8000	/* ored to Size until queueCompleteMsg	*/
-} MSG_HEAD;
-
-#define queueCompleteMsg(p) do { ((MSG_HEAD *)p - 1)->Size &= ~MSG_INCOMPLETE; } while (0)
-#define queueCount(q)	((q)->Count)
-#define MSG_NEED(size)							\
-	((sizeof(MSG_HEAD) + size + sizeof(dword) - 1) & ~(sizeof(dword) - 1))
-
-static void queueInit(MSG_QUEUE *Q, byte *Buffer, dword sizeBuffer) {
-	Q->Size = sizeBuffer;
-	Q->Base = Q->Head = Q->Tail = Buffer;
-	Q->High = Buffer + sizeBuffer;
-	Q->Wrap = NULL;
-	Q->Count = 0;
-}
-
-static byte *queueAllocMsg(MSG_QUEUE *Q, word size) {
-	/* Allocate 'size' bytes at tail of queue which will be filled later
-	 * directly with callers own message header info and/or message.
-	 * An 'alloced' message is marked incomplete by oring the 'Size' field
-	 * with MSG_INCOMPLETE.
-	 * This must be reset via queueCompleteMsg() after the message is filled.
-	 * As long as a message is marked incomplete queuePeekMsg() will return
-	 * a 'queue empty' condition when it reaches such a message.  */
-
-	MSG_HEAD *Msg;
-	word need = MSG_NEED(size);
-
-	if (Q->Tail == Q->Head) {
-		if (Q->Wrap || need > Q->Size) {
-			return NULL; /* full */
-		}
-		goto alloc; /* empty */
-	}
-
-	if (Q->Tail > Q->Head) {
-		if (Q->Tail + need <= Q->High) goto alloc; /* append */
-		if (Q->Base + need > Q->Head) {
-			return NULL; /* too much */
-		}
-		/* wraparound the queue (but not the message) */
-		Q->Wrap = Q->Tail;
-		Q->Tail = Q->Base;
-		goto alloc;
-	}
-
-	if (Q->Tail + need > Q->Head) {
-		return NULL; /* too much */
-	}
-
-alloc:
-	Msg = (MSG_HEAD *)Q->Tail;
-
-	Msg->Size = size | MSG_INCOMPLETE;
-
-	Q->Tail	 += need;
-	Q->Count += size;
-
-
-
-	return ((byte *)(Msg + 1));
-}
-
-static void queueFreeMsg(MSG_QUEUE *Q) {
-/* Free the message at head of queue */
-
-	word size = ((MSG_HEAD *)Q->Head)->Size & ~MSG_INCOMPLETE;
-
-	Q->Head  += MSG_NEED(size);
-	Q->Count -= size;
-
-	if (Q->Wrap) {
-		if (Q->Head >= Q->Wrap) {
-			Q->Head = Q->Base;
-			Q->Wrap = NULL;
-		}
-	} else if (Q->Head >= Q->Tail) {
-		Q->Head = Q->Tail = Q->Base;
-	}
-}
-
-static byte *queuePeekMsg(MSG_QUEUE *Q, word *size) {
-	/* Show the first valid message in queue BUT DON'T free the message.
-	 * After looking on the message contents it can be freed queueFreeMsg()
-	 * or simply remain in message queue.  */
-
-	MSG_HEAD *Msg = (MSG_HEAD *)Q->Head;
-
-	if (((byte *)Msg == Q->Tail && !Q->Wrap) ||
-	    (Msg->Size & MSG_INCOMPLETE)) {
-		return NULL;
-	} else {
-		*size = Msg->Size;
-		return ((byte *)(Msg + 1));
-	}
-}
-
-/*
-  Message queue header
-*/
-static MSG_QUEUE *dbg_queue;
-static byte *dbg_base;
-static int                 external_dbg_queue;
-static diva_os_spin_lock_t dbg_q_lock;
-static diva_os_spin_lock_t dbg_adapter_lock;
-static int                 dbg_q_busy;
-static volatile dword      dbg_sequence;
-
-/*
-  INTERFACE:
-  Initialize run time queue structures.
-  base:    base of the message queue
-  length:  length of the message queue
-  do_init: perfor queue reset
-
-  return:  zero on success, -1 on error
-*/
-int diva_maint_init(byte *base, unsigned long length, int do_init) {
-	if (dbg_queue || (!base) || (length < (4096 * 4))) {
-		return (-1);
-	}
-
-	TraceFilter[0]     =  0;
-	TraceFilterIdent   = -1;
-	TraceFilterChannel = -1;
-
-	dbg_base = base;
-
-	*(dword *)base  = (dword)DBG_MAGIC; /* Store Magic */
-	base   += sizeof(dword);
-	length -= sizeof(dword);
-
-	*(dword *)base = 2048; /* Extension Field Length */
-	base   += sizeof(dword);
-	length -= sizeof(dword);
-
-	strcpy(base, "KERNEL MODE BUFFER\n");
-	base   += 2048;
-	length -= 2048;
-
-	*(dword *)base = 0; /* Terminate extension */
-	base   += sizeof(dword);
-	length -= sizeof(dword);
-
-	*(void **)base  =  (void *)(base + sizeof(void *)); /* Store Base  */
-	base   += sizeof(void *);
-	length -= sizeof(void *);
-
-	dbg_queue = (MSG_QUEUE *)base;
-	queueInit(dbg_queue, base + sizeof(MSG_QUEUE), length - sizeof(MSG_QUEUE) - 512);
-	external_dbg_queue = 0;
-
-	if (!do_init) {
-		external_dbg_queue = 1; /* memory was located on the external device */
-	}
-
-
-	if (diva_os_initialize_spin_lock(&dbg_q_lock, "dbg_init")) {
-		dbg_queue = NULL;
-		dbg_base = NULL;
-		external_dbg_queue = 0;
-		return (-1);
-	}
-
-	if (diva_os_initialize_spin_lock(&dbg_adapter_lock, "dbg_init")) {
-		diva_os_destroy_spin_lock(&dbg_q_lock, "dbg_init");
-		dbg_queue = NULL;
-		dbg_base = NULL;
-		external_dbg_queue = 0;
-		return (-1);
-	}
-
-	return (0);
-}
-
-/*
-  INTERFACE:
-  Finit at unload time
-  return address of internal queue or zero if queue
-  was external
-*/
-void *diva_maint_finit(void) {
-	void *ret = (void *)dbg_base;
-	int i;
-
-	dbg_queue = NULL;
-	dbg_base  = NULL;
-
-	if (ret) {
-		diva_os_destroy_spin_lock(&dbg_q_lock, "dbg_finit");
-		diva_os_destroy_spin_lock(&dbg_adapter_lock, "dbg_finit");
-	}
-
-	if (external_dbg_queue) {
-		ret = NULL;
-	}
-	external_dbg_queue = 0;
-
-	for (i = 1; i < ARRAY_SIZE(clients); i++) {
-		if (clients[i].pmem) {
-			diva_os_free(0, clients[i].pmem);
-		}
-	}
-
-	return (ret);
-}
-
-/*
-  INTERFACE:
-  Return amount of messages in debug queue
-*/
-dword diva_dbg_q_length(void) {
-	return (dbg_queue ? queueCount(dbg_queue)	: 0);
-}
-
-/*
-  INTERFACE:
-  Lock message queue and return the pointer to the first
-  entry.
-*/
-diva_dbg_entry_head_t *diva_maint_get_message(word *size,
-					      diva_os_spin_lock_magic_t *old_irql) {
-	diva_dbg_entry_head_t *pmsg = NULL;
-
-	diva_os_enter_spin_lock(&dbg_q_lock, old_irql, "read");
-	if (dbg_q_busy) {
-		diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_busy");
-		return NULL;
-	}
-	dbg_q_busy = 1;
-
-	if (!(pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, size))) {
-		dbg_q_busy = 0;
-		diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_empty");
-	}
-
-	return (pmsg);
-}
-
-/*
-  INTERFACE:
-  acknowledge last message and unlock queue
-*/
-void diva_maint_ack_message(int do_release,
-			    diva_os_spin_lock_magic_t *old_irql) {
-	if (!dbg_q_busy) {
-		return;
-	}
-	if (do_release) {
-		queueFreeMsg(dbg_queue);
-	}
-	dbg_q_busy = 0;
-	diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_ack");
-}
-
-
-/*
-  INTERFACE:
-  PRT COMP function used to register
-  with MAINT adapter or log in compatibility
-  mode in case older driver version is connected too
-*/
-void diva_maint_prtComp(char *format, ...) {
-	void    *hDbg;
-	va_list ap;
-
-	if (!format)
-		return;
-
-	va_start(ap, format);
-
-	/*
-	  register to new log driver functions
-	*/
-	if ((format[0] == 0) && ((unsigned char)format[1] == 255)) {
-		hDbg = va_arg(ap, void *); /* ptr to DbgHandle */
-		DI_register(hDbg);
-	}
-
-	va_end(ap);
-}
-
-static void DI_register(void *arg) {
-	diva_os_spin_lock_magic_t old_irql;
-	dword sec, usec;
-	pDbgHandle	hDbg;
-	int id, free_id = -1, best_id = 0;
-
-	diva_os_get_time(&sec, &usec);
-
-	hDbg = (pDbgHandle)arg;
-	/*
-	  Check for bad args, specially for the old obsolete debug handle
-	*/
-	if ((hDbg == NULL) ||
-	    ((hDbg->id == 0) && (((_OldDbgHandle_ *)hDbg)->id == -1)) ||
-	    (hDbg->Registered != 0)) {
-		return;
-	}
-
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "register");
-
-	for (id = 1; id < ARRAY_SIZE(clients); id++) {
-		if (clients[id].hDbg == hDbg) {
-			/*
-			  driver already registered
-			*/
-			diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-			return;
-		}
-		if (clients[id].hDbg) { /* slot is busy */
-			continue;
-		}
-		free_id = id;
-		if (!strcmp(clients[id].drvName, hDbg->drvName)) {
-			/*
-			  This driver was already registered with this name
-			  and slot is still free - reuse it
-			*/
-			best_id = 1;
-			break;
-		}
-		if (!clients[id].hDbg) { /* slot is busy */
-			break;
-		}
-	}
-
-	if (free_id != -1) {
-		diva_dbg_entry_head_t *pmsg = NULL;
-		int len;
-		char tmp[256];
-		word size;
-
-		/*
-		  Register new driver with id == free_id
-		*/
-		clients[free_id].hDbg = hDbg;
-		clients[free_id].sec  = sec;
-		clients[free_id].usec = usec;
-		strcpy(clients[free_id].drvName, hDbg->drvName);
-
-		clients[free_id].dbgMask = hDbg->dbgMask;
-		if (best_id) {
-			hDbg->dbgMask |= clients[free_id].last_dbgMask;
-		} else {
-			clients[free_id].last_dbgMask = 0;
-		}
-
-		hDbg->Registered = DBG_HANDLE_REG_NEW;
-		hDbg->id         = (byte)free_id;
-		hDbg->dbg_end    = DI_deregister;
-		hDbg->dbg_prt    = DI_format_locked;
-		hDbg->dbg_ev     = DiProcessEventLog;
-		hDbg->dbg_irq    = DI_format_locked;
-		if (hDbg->Version > 0) {
-			hDbg->dbg_old  = DI_format_old;
-		}
-		hDbg->next       = (pDbgHandle)DBG_MAGIC;
-
-		/*
-		  Log driver register, MAINT driver ID is '0'
-		*/
-		len = sprintf(tmp, "DIMAINT - drv # %d = '%s' registered",
-			      free_id, hDbg->drvName);
-
-		while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-								       (word)(len + 1 + sizeof(*pmsg))))) {
-			if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-				queueFreeMsg(dbg_queue);
-			} else {
-				break;
-			}
-		}
-
-		if (pmsg) {
-			pmsg->sequence    = dbg_sequence++;
-			pmsg->time_sec    = sec;
-			pmsg->time_usec   = usec;
-			pmsg->facility    = MSG_TYPE_STRING;
-			pmsg->dli         = DLI_REG;
-			pmsg->drv_id      = 0; /* id 0 - DIMAINT */
-			pmsg->di_cpu      = 0;
-			pmsg->data_length = len + 1;
-
-			memcpy(&pmsg[1], tmp, len + 1);
-			queueCompleteMsg(pmsg);
-			diva_maint_wakeup_read();
-		}
-	}
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-}
-
-static void DI_deregister(pDbgHandle hDbg) {
-	diva_os_spin_lock_magic_t old_irql, old_irql1;
-	dword sec, usec;
-	int i;
-	word size;
-	byte *pmem = NULL;
-
-	diva_os_get_time(&sec, &usec);
-
-	diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "read");
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read");
-
-	for (i = 1; i < ARRAY_SIZE(clients); i++) {
-		if (clients[i].hDbg == hDbg) {
-			diva_dbg_entry_head_t *pmsg;
-			char tmp[256];
-			int len;
-
-			clients[i].hDbg = NULL;
-
-			hDbg->id       = -1;
-			hDbg->dbgMask  = 0;
-			hDbg->dbg_end  = NULL;
-			hDbg->dbg_prt  = NULL;
-			hDbg->dbg_irq  = NULL;
-			if (hDbg->Version > 0)
-				hDbg->dbg_old = NULL;
-			hDbg->Registered = 0;
-			hDbg->next     = NULL;
-
-			if (clients[i].pIdiLib) {
-				(*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib);
-				clients[i].pIdiLib = NULL;
-
-				pmem = clients[i].pmem;
-				clients[i].pmem = NULL;
-			}
-
-			/*
-			  Log driver register, MAINT driver ID is '0'
-			*/
-			len = sprintf(tmp, "DIMAINT - drv # %d = '%s' de-registered",
-				      i, hDbg->drvName);
-
-			while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-									      (word)(len + 1 + sizeof(*pmsg))))) {
-				if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-					queueFreeMsg(dbg_queue);
-				} else {
-					break;
-				}
-			}
-
-			if (pmsg) {
-				pmsg->sequence    = dbg_sequence++;
-				pmsg->time_sec    = sec;
-				pmsg->time_usec   = usec;
-				pmsg->facility    = MSG_TYPE_STRING;
-				pmsg->dli         = DLI_REG;
-				pmsg->drv_id      = 0; /* id 0 - DIMAINT */
-				pmsg->di_cpu      = 0;
-				pmsg->data_length = len + 1;
-
-				memcpy(&pmsg[1], tmp, len + 1);
-				queueCompleteMsg(pmsg);
-				diva_maint_wakeup_read();
-			}
-
-			break;
-		}
-	}
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_ack");
-	diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "read_ack");
-
-	if (pmem) {
-		diva_os_free(0, pmem);
-	}
-}
-
-static void DI_format_locked(unsigned short id,
-			     int type,
-			     char *format,
-			     va_list argument_list) {
-	DI_format(1, id, type, format, argument_list);
-}
-
-static void DI_format(int do_lock,
-		      unsigned short id,
-		      int type,
-		      char *format,
-		      va_list ap) {
-	diva_os_spin_lock_magic_t old_irql;
-	dword sec, usec;
-	diva_dbg_entry_head_t *pmsg = NULL;
-	dword length;
-	word size;
-	static char fmtBuf[MSG_FRAME_MAX_SIZE + sizeof(*pmsg) + 1];
-	char          *data;
-	unsigned short code;
-
-	if (diva_os_in_irq()) {
-		dbg_sequence++;
-		return;
-	}
-
-	if ((!format) ||
-	    ((TraceFilter[0] != 0) && ((TraceFilterIdent < 0) || (TraceFilterChannel < 0)))) {
-		return;
-	}
-
-
-
-	diva_os_get_time(&sec, &usec);
-
-	if (do_lock) {
-		diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "format");
-	}
-
-	switch (type) {
-	case DLI_MXLOG:
-	case DLI_BLK:
-	case DLI_SEND:
-	case DLI_RECV:
-		if (!(length = va_arg(ap, unsigned long))) {
-			break;
-		}
-		if (length > MaxDumpSize) {
-			length = MaxDumpSize;
-		}
-		while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-								       (word)length + sizeof(*pmsg)))) {
-			if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-				queueFreeMsg(dbg_queue);
-			} else {
-				break;
-			}
-		}
-		if (pmsg) {
-			memcpy(&pmsg[1], format, length);
-			pmsg->sequence    = dbg_sequence++;
-			pmsg->time_sec    = sec;
-			pmsg->time_usec   = usec;
-			pmsg->facility    = MSG_TYPE_BINARY;
-			pmsg->dli         = type; /* DLI_XXX */
-			pmsg->drv_id      = id;   /* driver MAINT id */
-			pmsg->di_cpu      = 0;
-			pmsg->data_length = length;
-			queueCompleteMsg(pmsg);
-		}
-		break;
-
-	case DLI_XLOG: {
-		byte *p;
-		data    = va_arg(ap, char *);
-		code    = (unsigned short)va_arg(ap, unsigned int);
-		length	= (unsigned long)va_arg(ap, unsigned int);
-
-		if (length > MaxXlogSize)
-			length = MaxXlogSize;
-
-		while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-								      (word)length + sizeof(*pmsg) + 2))) {
-			if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-				queueFreeMsg(dbg_queue);
-			} else {
-				break;
-			}
-		}
-		if (pmsg) {
-			p = (byte *)&pmsg[1];
-			p[0] = (char)(code);
-			p[1] = (char)(code >> 8);
-			if (data && length) {
-				memcpy(&p[2], &data[0], length);
-			}
-			length += 2;
-
-			pmsg->sequence    = dbg_sequence++;
-			pmsg->time_sec    = sec;
-			pmsg->time_usec   = usec;
-			pmsg->facility    = MSG_TYPE_BINARY;
-			pmsg->dli         = type; /* DLI_XXX */
-			pmsg->drv_id      = id;   /* driver MAINT id */
-			pmsg->di_cpu      = 0;
-			pmsg->data_length = length;
-			queueCompleteMsg(pmsg);
-		}
-	} break;
-
-	case DLI_LOG:
-	case DLI_FTL:
-	case DLI_ERR:
-	case DLI_TRC:
-	case DLI_REG:
-	case DLI_MEM:
-	case DLI_SPL:
-	case DLI_IRP:
-	case DLI_TIM:
-	case DLI_TAPI:
-	case DLI_NDIS:
-	case DLI_CONN:
-	case DLI_STAT:
-	case DLI_PRV0:
-	case DLI_PRV1:
-	case DLI_PRV2:
-	case DLI_PRV3:
-		if ((length = (unsigned long)vsprintf(&fmtBuf[0], format, ap)) > 0) {
-			length += (sizeof(*pmsg) + 1);
-
-			while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-									       (word)length))) {
-				if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-					queueFreeMsg(dbg_queue);
-				} else {
-					break;
-				}
-			}
-
-			pmsg->sequence    = dbg_sequence++;
-			pmsg->time_sec    = sec;
-			pmsg->time_usec   = usec;
-			pmsg->facility    = MSG_TYPE_STRING;
-			pmsg->dli         = type; /* DLI_XXX */
-			pmsg->drv_id      = id;   /* driver MAINT id */
-			pmsg->di_cpu      = 0;
-			pmsg->data_length = length - sizeof(*pmsg);
-
-			memcpy(&pmsg[1], fmtBuf, pmsg->data_length);
-			queueCompleteMsg(pmsg);
-		}
-		break;
-
-	} /* switch type */
-
-
-	if (queueCount(dbg_queue)) {
-		diva_maint_wakeup_read();
-	}
-
-	if (do_lock) {
-		diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "format");
-	}
-}
-
-/*
-  Write driver ID and driver revision to callers buffer
-*/
-int diva_get_driver_info(dword id, byte *data, int data_length) {
-	diva_os_spin_lock_magic_t old_irql;
-	byte *p = data;
-	int to_copy;
-
-	if (!data || !id || (data_length < 17) ||
-	    (id >= ARRAY_SIZE(clients))) {
-		return (-1);
-	}
-
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
-	if (clients[id].hDbg) {
-		*p++ = 1;
-		*p++ = (byte)clients[id].sec; /* save seconds */
-		*p++ = (byte)(clients[id].sec >>  8);
-		*p++ = (byte)(clients[id].sec >> 16);
-		*p++ = (byte)(clients[id].sec >> 24);
-
-		*p++ = (byte)(clients[id].usec / 1000); /* save mseconds */
-		*p++ = (byte)((clients[id].usec / 1000) >>  8);
-		*p++ = (byte)((clients[id].usec / 1000) >> 16);
-		*p++ = (byte)((clients[id].usec / 1000) >> 24);
-
-		data_length -= 9;
-
-		if ((to_copy = min(strlen(clients[id].drvName), (size_t)(data_length - 1)))) {
-			memcpy(p, clients[id].drvName, to_copy);
-			p += to_copy;
-			data_length -= to_copy;
-			if ((data_length >= 4) && clients[id].hDbg->drvTag[0]) {
-				*p++ = '(';
-				data_length -= 1;
-				if ((to_copy = min(strlen(clients[id].hDbg->drvTag), (size_t)(data_length - 2)))) {
-					memcpy(p, clients[id].hDbg->drvTag, to_copy);
-					p += to_copy;
-					data_length -= to_copy;
-					if (data_length >= 2) {
-						*p++ = ')';
-						data_length--;
-					}
-				}
-			}
-		}
-	}
-	*p++ = 0;
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
-	return (p - data);
-}
-
-int diva_get_driver_dbg_mask(dword id, byte *data) {
-	diva_os_spin_lock_magic_t old_irql;
-	int ret = -1;
-
-	if (!data || !id || (id >= ARRAY_SIZE(clients))) {
-		return (-1);
-	}
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
-	if (clients[id].hDbg) {
-		ret = 4;
-		*data++ = (byte)(clients[id].hDbg->dbgMask);
-		*data++ = (byte)(clients[id].hDbg->dbgMask >>  8);
-		*data++ = (byte)(clients[id].hDbg->dbgMask >> 16);
-		*data++ = (byte)(clients[id].hDbg->dbgMask >> 24);
-	}
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
-	return (ret);
-}
-
-int diva_set_driver_dbg_mask(dword id, dword mask) {
-	diva_os_spin_lock_magic_t old_irql, old_irql1;
-	int ret = -1;
-
-
-	if (!id || (id >= ARRAY_SIZE(clients))) {
-		return (-1);
-	}
-
-	diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "dbg mask");
-
-	if (clients[id].hDbg) {
-		dword old_mask = clients[id].hDbg->dbgMask;
-		mask &= 0x7fffffff;
-		clients[id].hDbg->dbgMask = mask;
-		clients[id].last_dbgMask = (clients[id].hDbg->dbgMask | clients[id].dbgMask);
-		ret = 4;
-		diva_change_management_debug_mask(&clients[id], old_mask);
-	}
-
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "dbg mask");
-
-	if (clients[id].request_pending) {
-		clients[id].request_pending = 0;
-		(*(clients[id].request))((ENTITY *)(*(clients[id].pIdiLib->DivaSTraceGetHandle))(clients[id].pIdiLib->hLib));
-	}
-
-	diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
-
-	return (ret);
-}
-
-static int diva_get_idi_adapter_info(IDI_CALL request, dword *serial, dword *logical) {
-	IDI_SYNC_REQ sync_req;
-
-	sync_req.xdi_logical_adapter_number.Req = 0;
-	sync_req.xdi_logical_adapter_number.Rc = IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER;
-	(*request)((ENTITY *)&sync_req);
-	*logical = sync_req.xdi_logical_adapter_number.info.logical_adapter_number;
-
-	sync_req.GetSerial.Req = 0;
-	sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL;
-	sync_req.GetSerial.serial = 0;
-	(*request)((ENTITY *)&sync_req);
-	*serial = sync_req.GetSerial.serial;
-
-	return (0);
-}
-
-/*
-  Register XDI adapter as MAINT compatible driver
-*/
-void diva_mnt_add_xdi_adapter(const DESCRIPTOR *d) {
-	diva_os_spin_lock_magic_t old_irql, old_irql1;
-	dword sec, usec, logical, serial, org_mask;
-	int id, free_id = -1;
-	char tmp[128];
-	diva_dbg_entry_head_t *pmsg = NULL;
-	int len;
-	word size;
-	byte *pmem;
-
-	diva_os_get_time(&sec, &usec);
-	diva_get_idi_adapter_info(d->request, &serial, &logical);
-	if (serial & 0xff000000) {
-		sprintf(tmp, "ADAPTER:%d SN:%u-%d",
-			(int)logical,
-			serial & 0x00ffffff,
-			(byte)(((serial & 0xff000000) >> 24) + 1));
-	} else {
-		sprintf(tmp, "ADAPTER:%d SN:%u", (int)logical, serial);
-	}
-
-	if (!(pmem = diva_os_malloc(0, DivaSTraceGetMemotyRequirement(d->channels)))) {
-		return;
-	}
-	memset(pmem, 0x00, DivaSTraceGetMemotyRequirement(d->channels));
-
-	diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "register");
-
-	for (id = 1; id < ARRAY_SIZE(clients); id++) {
-		if (clients[id].hDbg && (clients[id].request == d->request)) {
-			diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-			diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
-			diva_os_free(0, pmem);
-			return;
-		}
-		if (clients[id].hDbg) { /* slot is busy */
-			continue;
-		}
-		if (free_id < 0) {
-			free_id = id;
-		}
-		if (!strcmp(clients[id].drvName, tmp)) {
-			/*
-			  This driver was already registered with this name
-			  and slot is still free - reuse it
-			*/
-			free_id = id;
-			break;
-		}
-	}
-
-	if (free_id < 0) {
-		diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-		diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
-		diva_os_free(0, pmem);
-		return;
-	}
-
-	id = free_id;
-	clients[id].request  = d->request;
-	clients[id].request_pending = 0;
-	clients[id].hDbg     = &clients[id].Dbg;
-	clients[id].sec      = sec;
-	clients[id].usec     = usec;
-	strcpy(clients[id].drvName,     tmp);
-	strcpy(clients[id].Dbg.drvName, tmp);
-	clients[id].Dbg.drvTag[0] = 0;
-	clients[id].logical  = (int)logical;
-	clients[id].channels = (int)d->channels;
-	clients[id].dma_handle = -1;
-
-	clients[id].Dbg.dbgMask    = 0;
-	clients[id].dbgMask        = clients[id].Dbg.dbgMask;
-	if (id) {
-		clients[id].Dbg.dbgMask |= clients[free_id].last_dbgMask;
-	} else {
-		clients[id].last_dbgMask = 0;
-	}
-	clients[id].Dbg.Registered = DBG_HANDLE_REG_NEW;
-	clients[id].Dbg.id         = (byte)id;
-	clients[id].Dbg.dbg_end    = DI_deregister;
-	clients[id].Dbg.dbg_prt    = DI_format_locked;
-	clients[id].Dbg.dbg_ev     = DiProcessEventLog;
-	clients[id].Dbg.dbg_irq    = DI_format_locked;
-	clients[id].Dbg.next       = (pDbgHandle)DBG_MAGIC;
-
-	{
-		diva_trace_library_user_interface_t diva_maint_user_ifc = { &clients[id],
-									    diva_maint_state_change_notify,
-									    diva_maint_trace_notify,
-									    diva_maint_error };
-
-		/*
-		  Attach to adapter management interface
-		*/
-		if ((clients[id].pIdiLib =
-		     DivaSTraceLibraryCreateInstance((int)logical, &diva_maint_user_ifc, pmem))) {
-			if (((*(clients[id].pIdiLib->DivaSTraceLibraryStart))(clients[id].pIdiLib->hLib))) {
-				diva_mnt_internal_dprintf(0, DLI_ERR, "Adapter(%d) Start failed", (int)logical);
-				(*(clients[id].pIdiLib->DivaSTraceLibraryFinit))(clients[id].pIdiLib->hLib);
-				clients[id].pIdiLib = NULL;
-			}
-		} else {
-			diva_mnt_internal_dprintf(0, DLI_ERR, "A(%d) management init failed", (int)logical);
-		}
-	}
-
-	if (!clients[id].pIdiLib) {
-		clients[id].request = NULL;
-		clients[id].request_pending = 0;
-		clients[id].hDbg    = NULL;
-		diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-		diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
-		diva_os_free(0, pmem);
-		return;
-	}
-
-	/*
-	  Log driver register, MAINT driver ID is '0'
-	*/
-	len = sprintf(tmp, "DIMAINT - drv # %d = '%s' registered",
-		      id, clients[id].Dbg.drvName);
-
-	while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-							       (word)(len + 1 + sizeof(*pmsg))))) {
-		if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-			queueFreeMsg(dbg_queue);
-		} else {
-			break;
-		}
-	}
-
-	if (pmsg) {
-		pmsg->sequence    = dbg_sequence++;
-		pmsg->time_sec    = sec;
-		pmsg->time_usec   = usec;
-		pmsg->facility    = MSG_TYPE_STRING;
-		pmsg->dli         = DLI_REG;
-		pmsg->drv_id      = 0; /* id 0 - DIMAINT */
-		pmsg->di_cpu      = 0;
-		pmsg->data_length = len + 1;
-
-		memcpy(&pmsg[1], tmp, len + 1);
-		queueCompleteMsg(pmsg);
-		diva_maint_wakeup_read();
-	}
-
-	org_mask = clients[id].Dbg.dbgMask;
-	clients[id].Dbg.dbgMask = 0;
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-
-	if (clients[id].request_pending) {
-		clients[id].request_pending = 0;
-		(*(clients[id].request))((ENTITY *)(*(clients[id].pIdiLib->DivaSTraceGetHandle))(clients[id].pIdiLib->hLib));
-	}
-
-	diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
-
-	diva_set_driver_dbg_mask(id, org_mask);
-}
-
-/*
-  De-Register XDI adapter
-*/
-void diva_mnt_remove_xdi_adapter(const DESCRIPTOR *d) {
-	diva_os_spin_lock_magic_t old_irql, old_irql1;
-	dword sec, usec;
-	int i;
-	word size;
-	byte *pmem = NULL;
-
-	diva_os_get_time(&sec, &usec);
-
-	diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "read");
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read");
-
-	for (i = 1; i < ARRAY_SIZE(clients); i++) {
-		if (clients[i].hDbg && (clients[i].request == d->request)) {
-			diva_dbg_entry_head_t *pmsg;
-			char tmp[256];
-			int len;
-
-			if (clients[i].pIdiLib) {
-				(*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib);
-				clients[i].pIdiLib = NULL;
-
-				pmem = clients[i].pmem;
-				clients[i].pmem = NULL;
-			}
-
-			clients[i].hDbg    = NULL;
-			clients[i].request_pending = 0;
-			if (clients[i].dma_handle >= 0) {
-				/*
-				  Free DMA handle
-				*/
-				diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle);
-				clients[i].dma_handle = -1;
-			}
-			clients[i].request = NULL;
-
-			/*
-			  Log driver register, MAINT driver ID is '0'
-			*/
-			len = sprintf(tmp, "DIMAINT - drv # %d = '%s' de-registered",
-				      i, clients[i].Dbg.drvName);
-
-			memset(&clients[i].Dbg, 0x00, sizeof(clients[i].Dbg));
-
-			while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-									       (word)(len + 1 + sizeof(*pmsg))))) {
-				if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-					queueFreeMsg(dbg_queue);
-				} else {
-					break;
-				}
-			}
-
-			if (pmsg) {
-				pmsg->sequence    = dbg_sequence++;
-				pmsg->time_sec    = sec;
-				pmsg->time_usec   = usec;
-				pmsg->facility    = MSG_TYPE_STRING;
-				pmsg->dli         = DLI_REG;
-				pmsg->drv_id      = 0; /* id 0 - DIMAINT */
-				pmsg->di_cpu      = 0;
-				pmsg->data_length = len + 1;
-
-				memcpy(&pmsg[1], tmp, len + 1);
-				queueCompleteMsg(pmsg);
-				diva_maint_wakeup_read();
-			}
-
-			break;
-		}
-	}
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_ack");
-	diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "read_ack");
-
-	if (pmem) {
-		diva_os_free(0, pmem);
-	}
-}
-
-/* ----------------------------------------------------------------
-   Low level interface for management interface client
-   ---------------------------------------------------------------- */
-/*
-  Return handle to client structure
-*/
-void *SuperTraceOpenAdapter(int AdapterNumber) {
-	int i;
-
-	for (i = 1; i < ARRAY_SIZE(clients); i++) {
-		if (clients[i].hDbg && clients[i].request && (clients[i].logical == AdapterNumber)) {
-			return (&clients[i]);
-		}
-	}
-
-	return NULL;
-}
-
-int SuperTraceCloseAdapter(void *AdapterHandle) {
-	return (0);
-}
-
-int SuperTraceReadRequest(void *AdapterHandle, const char *name, byte *data) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
-	if (pC && pC->pIdiLib && pC->request) {
-		ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
-		byte *xdata = (byte *)&pC->xbuffer[0];
-		char tmp = 0;
-		word length;
-
-		if (!strcmp(name, "\\")) { /* Read ROOT */
-			name = &tmp;
-		}
-		length = SuperTraceCreateReadReq(xdata, name);
-		single_p(xdata, &length, 0); /* End Of Message */
-
-		e->Req        = MAN_READ;
-		e->ReqCh      = 0;
-		e->X->PLength = length;
-		e->X->P	= (byte *)xdata;
-
-		pC->request_pending = 1;
-
-		return (0);
-	}
-
-	return (-1);
-}
-
-int SuperTraceGetNumberOfChannels(void *AdapterHandle) {
-	if (AdapterHandle) {
-		diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
-		return (pC->channels);
-	}
-
-	return (0);
-}
-
-int SuperTraceASSIGN(void *AdapterHandle, byte *data) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
-	if (pC && pC->pIdiLib && pC->request) {
-		ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
-		IDI_SYNC_REQ *preq;
-		char buffer[((sizeof(preq->xdi_extended_features) + 4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features) + 4) : sizeof(ENTITY)];
-		char features[4];
-		word assign_data_length = 1;
-
-		features[0] = 0;
-		pC->xbuffer[0] = 0;
-		preq = (IDI_SYNC_REQ *)&buffer[0];
-		preq->xdi_extended_features.Req = 0;
-		preq->xdi_extended_features.Rc  = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES;
-		preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features);
-		preq->xdi_extended_features.info.features = &features[0];
-
-		(*(pC->request))((ENTITY *)preq);
-
-		if ((features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) &&
-		    (features[0] & DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA)) {
-			dword uninitialized_var(rx_dma_magic);
-			if ((pC->dma_handle = diva_get_dma_descriptor(pC->request, &rx_dma_magic)) >= 0) {
-				pC->xbuffer[0] = LLI;
-				pC->xbuffer[1] = 8;
-				pC->xbuffer[2] = 0x40;
-				pC->xbuffer[3] = (byte)pC->dma_handle;
-				pC->xbuffer[4] = (byte)rx_dma_magic;
-				pC->xbuffer[5] = (byte)(rx_dma_magic >>  8);
-				pC->xbuffer[6] = (byte)(rx_dma_magic >> 16);
-				pC->xbuffer[7] = (byte)(rx_dma_magic >> 24);
-				pC->xbuffer[8] = (byte)(DIVA_MAX_MANAGEMENT_TRANSFER_SIZE & 0xFF);
-				pC->xbuffer[9] = (byte)(DIVA_MAX_MANAGEMENT_TRANSFER_SIZE >> 8);
-				pC->xbuffer[10] = 0;
-
-				assign_data_length = 11;
-			}
-		} else {
-			pC->dma_handle = -1;
-		}
-
-		e->Id          = MAN_ID;
-		e->callback    = diva_maint_xdi_cb;
-		e->XNum        = 1;
-		e->X           = &pC->XData;
-		e->Req         = ASSIGN;
-		e->ReqCh       = 0;
-		e->X->PLength  = assign_data_length;
-		e->X->P        = (byte *)&pC->xbuffer[0];
-
-		pC->request_pending = 1;
-
-		return (0);
-	}
-
-	return (-1);
-}
-
-int SuperTraceREMOVE(void *AdapterHandle) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
-	if (pC && pC->pIdiLib && pC->request) {
-		ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
-
-		e->XNum        = 1;
-		e->X           = &pC->XData;
-		e->Req         = REMOVE;
-		e->ReqCh       = 0;
-		e->X->PLength  = 1;
-		e->X->P        = (byte *)&pC->xbuffer[0];
-		pC->xbuffer[0] = 0;
-
-		pC->request_pending = 1;
-
-		return (0);
-	}
-
-	return (-1);
-}
-
-int SuperTraceTraceOnRequest(void *hAdapter, const char *name, byte *data) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)hAdapter;
-
-	if (pC && pC->pIdiLib && pC->request) {
-		ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
-		byte *xdata = (byte *)&pC->xbuffer[0];
-		char tmp = 0;
-		word length;
-
-		if (!strcmp(name, "\\")) { /* Read ROOT */
-			name = &tmp;
-		}
-		length = SuperTraceCreateReadReq(xdata, name);
-		single_p(xdata, &length, 0); /* End Of Message */
-		e->Req          = MAN_EVENT_ON;
-		e->ReqCh        = 0;
-		e->X->PLength   = length;
-		e->X->P = (byte *)xdata;
-
-		pC->request_pending = 1;
-
-		return (0);
-	}
-
-	return (-1);
-}
-
-int SuperTraceWriteVar(void *AdapterHandle,
-		       byte *data,
-		       const char *name,
-		       void *var,
-		       byte type,
-		       byte var_length) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
-	if (pC && pC->pIdiLib && pC->request) {
-		ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
-		diva_man_var_header_t *pVar = (diva_man_var_header_t *)&pC->xbuffer[0];
-		word length = SuperTraceCreateReadReq((byte *)pVar, name);
-
-		memcpy(&pC->xbuffer[length], var, var_length);
-		length += var_length;
-		pVar->length += var_length;
-		pVar->value_length = var_length;
-		pVar->type = type;
-		single_p((byte *)pVar, &length, 0); /* End Of Message */
-
-		e->Req = MAN_WRITE;
-		e->ReqCh = 0;
-		e->X->PLength   = length;
-		e->X->P = (byte *)pVar;
-
-		pC->request_pending = 1;
-
-		return (0);
-	}
-
-	return (-1);
-}
-
-int SuperTraceExecuteRequest(void *AdapterHandle,
-			     const char *name,
-			     byte *data) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
-	if (pC && pC->pIdiLib && pC->request) {
-		ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
-		byte *xdata = (byte *)&pC->xbuffer[0];
-		word length;
-
-		length = SuperTraceCreateReadReq(xdata, name);
-		single_p(xdata, &length, 0); /* End Of Message */
-
-		e->Req = MAN_EXECUTE;
-		e->ReqCh = 0;
-		e->X->PLength = length;
-		e->X->P = (byte *)xdata;
-
-		pC->request_pending = 1;
-
-		return (0);
-	}
-
-	return (-1);
-}
-
-static word SuperTraceCreateReadReq(byte *P, const char *path) {
-	byte var_length;
-	byte *plen;
-
-	var_length = (byte)strlen(path);
-
-	*P++ = ESC;
-	plen = P++;
-	*P++ = 0x80; /* MAN_IE */
-	*P++ = 0x00; /* Type */
-	*P++ = 0x00; /* Attribute */
-	*P++ = 0x00; /* Status */
-	*P++ = 0x00; /* Variable Length */
-	*P++ = var_length;
-	memcpy(P, path, var_length);
-	P += var_length;
-	*plen = var_length + 0x06;
-
-	return ((word)(var_length + 0x08));
-}
-
-static void single_p(byte *P, word *PLength, byte Id) {
-	P[(*PLength)++] = Id;
-}
-
-static void diva_maint_xdi_cb(ENTITY *e) {
-	diva_strace_context_t *pLib = DIVAS_CONTAINING_RECORD(e, diva_strace_context_t, e);
-	diva_maint_client_t *pC;
-	diva_os_spin_lock_magic_t old_irql, old_irql1;
-
-
-	diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "xdi_cb");
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "xdi_cb");
-
-	pC = (diva_maint_client_t *)pLib->hAdapter;
-
-	if ((e->complete == 255) || (pC->dma_handle < 0)) {
-		if ((*(pLib->instance.DivaSTraceMessageInput))(&pLib->instance)) {
-			diva_mnt_internal_dprintf(0, DLI_ERR, "Trace internal library error");
-		}
-	} else {
-		/*
-		  Process combined management interface indication
-		*/
-		if ((*(pLib->instance.DivaSTraceMessageInput))(&pLib->instance)) {
-			diva_mnt_internal_dprintf(0, DLI_ERR, "Trace internal library error (DMA mode)");
-		}
-	}
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "xdi_cb");
-
-
-	if (pC->request_pending) {
-		pC->request_pending = 0;
-		(*(pC->request))(e);
-	}
-
-	diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "xdi_cb");
-}
-
-
-static void diva_maint_error(void *user_context,
-			     diva_strace_library_interface_t *hLib,
-			     int Adapter,
-			     int error,
-			     const char *file,
-			     int line) {
-	diva_mnt_internal_dprintf(0, DLI_ERR,
-				  "Trace library error(%d) A(%d) %s %d", error, Adapter, file, line);
-}
-
-static void print_ie(diva_trace_ie_t *ie, char *buffer, int length) {
-	int i;
-
-	buffer[0] = 0;
-
-	if (length > 32) {
-		for (i = 0; ((i < ie->length) && (length > 3)); i++) {
-			sprintf(buffer, "%02x", ie->data[i]);
-			buffer += 2;
-			length -= 2;
-			if (i < (ie->length - 1)) {
-				strcpy(buffer, " ");
-				buffer++;
-				length--;
-			}
-		}
-	}
-}
-
-static void diva_maint_state_change_notify(void *user_context,
-					   diva_strace_library_interface_t *hLib,
-					   int Adapter,
-					   diva_trace_line_state_t *channel,
-					   int notify_subject) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)user_context;
-	diva_trace_fax_state_t *fax = &channel->fax;
-	diva_trace_modem_state_t *modem = &channel->modem;
-	char tmp[256];
-
-	if (!pC->hDbg) {
-		return;
-	}
-
-	switch (notify_subject) {
-	case DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE: {
-		int view = (TraceFilter[0] == 0);
-		/*
-		  Process selective Trace
-		*/
-		if (channel->Line[0] == 'I' && channel->Line[1] == 'd' &&
-		    channel->Line[2] == 'l' && channel->Line[3] == 'e') {
-			if ((TraceFilterIdent == pC->hDbg->id) && (TraceFilterChannel == (int)channel->ChannelNumber)) {
-				(*(hLib->DivaSTraceSetBChannel))(hLib, (int)channel->ChannelNumber, 0);
-				(*(hLib->DivaSTraceSetAudioTap))(hLib, (int)channel->ChannelNumber, 0);
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Selective Trace OFF for Ch=%d",
-							  (int)channel->ChannelNumber);
-				TraceFilterIdent   = -1;
-				TraceFilterChannel = -1;
-				view = 1;
-			}
-		} else if (TraceFilter[0] && (TraceFilterIdent < 0) && !(diva_mnt_cmp_nmbr(&channel->RemoteAddress[0]) &&
-									 diva_mnt_cmp_nmbr(&channel->LocalAddress[0]))) {
-
-			if ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0) { /* Activate B-channel trace */
-				(*(hLib->DivaSTraceSetBChannel))(hLib, (int)channel->ChannelNumber, 1);
-			}
-			if ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0) { /* Activate AudioTap Trace */
-				(*(hLib->DivaSTraceSetAudioTap))(hLib, (int)channel->ChannelNumber, 1);
-			}
-
-			TraceFilterIdent   = pC->hDbg->id;
-			TraceFilterChannel = (int)channel->ChannelNumber;
-
-			if (TraceFilterIdent >= 0) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Selective Trace ON for Ch=%d",
-							  (int)channel->ChannelNumber);
-				view = 1;
-			}
-		}
-		if (view && (pC->hDbg->dbgMask & DIVA_MGT_DBG_LINE_EVENTS)) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Ch     = %d",
-						  (int)channel->ChannelNumber);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Status = <%s>", &channel->Line[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer1 = <%s>", &channel->Framing[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer2 = <%s>", &channel->Layer2[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer3 = <%s>", &channel->Layer3[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L RAddr  = <%s>",
-						  &channel->RemoteAddress[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L RSAddr = <%s>",
-						  &channel->RemoteSubAddress[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LAddr  = <%s>",
-						  &channel->LocalAddress[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LSAddr = <%s>",
-						  &channel->LocalSubAddress[0]);
-			print_ie(&channel->call_BC, tmp, sizeof(tmp));
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L BC     = <%s>", tmp);
-			print_ie(&channel->call_HLC, tmp, sizeof(tmp));
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L HLC    = <%s>", tmp);
-			print_ie(&channel->call_LLC, tmp, sizeof(tmp));
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LLC    = <%s>", tmp);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L CR     = 0x%x", channel->CallReference);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Disc   = 0x%x",
-						  channel->LastDisconnecCause);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Owner  = <%s>", &channel->UserID[0]);
-		}
-
-	} break;
-
-	case DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE:
-		if (pC->hDbg->dbgMask & DIVA_MGT_DBG_MDM_PROGRESS) {
-			{
-				int ch = TraceFilterChannel;
-				int id = TraceFilterIdent;
-
-				if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) &&
-				    (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) {
-					if (ch != (int)modem->ChannelNumber) {
-						break;
-					}
-				} else if (TraceFilter[0] != 0) {
-					break;
-				}
-			}
-
-
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Ch    = %lu",
-						  (int)modem->ChannelNumber);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Event = %lu", modem->Event);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Norm  = %lu", modem->Norm);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Opts. = 0x%08x", modem->Options);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Tx    = %lu Bps", modem->TxSpeed);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Rx    = %lu Bps", modem->RxSpeed);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RT    = %lu mSec",
-						  modem->RoundtripMsec);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Sr    = %lu", modem->SymbolRate);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Rxl   = %d dBm", modem->RxLeveldBm);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM El    = %d dBm", modem->EchoLeveldBm);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM SNR   = %lu dB", modem->SNRdb);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM MAE   = %lu", modem->MAE);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM LRet  = %lu",
-						  modem->LocalRetrains);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RRet  = %lu",
-						  modem->RemoteRetrains);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM LRes  = %lu", modem->LocalResyncs);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RRes  = %lu",
-						  modem->RemoteResyncs);
-			if (modem->Event == 3) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Disc  =  %lu", modem->DiscReason);
-			}
-		}
-		if ((modem->Event == 3) && (pC->hDbg->dbgMask & DIVA_MGT_DBG_MDM_STATISTICS)) {
-			(*(pC->pIdiLib->DivaSTraceGetModemStatistics))(pC->pIdiLib);
-		}
-		break;
-
-	case DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE:
-		if (pC->hDbg->dbgMask & DIVA_MGT_DBG_FAX_PROGRESS) {
-			{
-				int ch = TraceFilterChannel;
-				int id = TraceFilterIdent;
-
-				if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) &&
-				    (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) {
-					if (ch != (int)fax->ChannelNumber) {
-						break;
-					}
-				} else if (TraceFilter[0] != 0) {
-					break;
-				}
-			}
-
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Ch    = %lu", (int)fax->ChannelNumber);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Event = %lu",     fax->Event);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Pages = %lu",     fax->Page_Counter);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Feat. = 0x%08x",  fax->Features);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX ID    = <%s>",    &fax->Station_ID[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Saddr = <%s>",    &fax->Subaddress[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Pwd   = <%s>",    &fax->Password[0]);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Speed = %lu",     fax->Speed);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Res.  = 0x%08x",  fax->Resolution);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Width = %lu",     fax->Paper_Width);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Length= %lu",     fax->Paper_Length);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX SLT   = %lu",     fax->Scanline_Time);
-			if (fax->Event == 3) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Disc  = %lu",     fax->Disc_Reason);
-			}
-		}
-		if ((fax->Event == 3) && (pC->hDbg->dbgMask & DIVA_MGT_DBG_FAX_STATISTICS)) {
-			(*(pC->pIdiLib->DivaSTraceGetFaxStatistics))(pC->pIdiLib);
-		}
-		break;
-
-	case DIVA_SUPER_TRACE_INTERFACE_CHANGE:
-		if (pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_EVENTS) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT,
-						  "Layer 1 -> [%s]", channel->pInterface->Layer1);
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT,
-						  "Layer 2 -> [%s]", channel->pInterface->Layer2);
-		}
-		break;
-
-	case DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE:
-		if (pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_STATISTICS) {
-			/*
-			  Incoming Statistics
-			*/
-			if (channel->pInterfaceStat->inc.Calls) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Calls                     =%lu", channel->pInterfaceStat->inc.Calls);
-			}
-			if (channel->pInterfaceStat->inc.Connected) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Connected                 =%lu", channel->pInterfaceStat->inc.Connected);
-			}
-			if (channel->pInterfaceStat->inc.User_Busy) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Busy                      =%lu", channel->pInterfaceStat->inc.User_Busy);
-			}
-			if (channel->pInterfaceStat->inc.Call_Rejected) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Rejected                  =%lu", channel->pInterfaceStat->inc.Call_Rejected);
-			}
-			if (channel->pInterfaceStat->inc.Wrong_Number) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Wrong Nr                  =%lu", channel->pInterfaceStat->inc.Wrong_Number);
-			}
-			if (channel->pInterfaceStat->inc.Incompatible_Dst) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Incomp. Dest              =%lu", channel->pInterfaceStat->inc.Incompatible_Dst);
-			}
-			if (channel->pInterfaceStat->inc.Out_of_Order) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Out of Order              =%lu", channel->pInterfaceStat->inc.Out_of_Order);
-			}
-			if (channel->pInterfaceStat->inc.Ignored) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Inc Ignored                   =%lu", channel->pInterfaceStat->inc.Ignored);
-			}
-
-			/*
-			  Outgoing Statistics
-			*/
-			if (channel->pInterfaceStat->outg.Calls) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Outg Calls                    =%lu", channel->pInterfaceStat->outg.Calls);
-			}
-			if (channel->pInterfaceStat->outg.Connected) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Outg Connected                =%lu", channel->pInterfaceStat->outg.Connected);
-			}
-			if (channel->pInterfaceStat->outg.User_Busy) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Outg Busy                     =%lu", channel->pInterfaceStat->outg.User_Busy);
-			}
-			if (channel->pInterfaceStat->outg.No_Answer) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Outg No Answer                =%lu", channel->pInterfaceStat->outg.No_Answer);
-			}
-			if (channel->pInterfaceStat->outg.Wrong_Number) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Outg Wrong Nr                 =%lu", channel->pInterfaceStat->outg.Wrong_Number);
-			}
-			if (channel->pInterfaceStat->outg.Call_Rejected) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Outg Rejected                 =%lu", channel->pInterfaceStat->outg.Call_Rejected);
-			}
-			if (channel->pInterfaceStat->outg.Other_Failures) {
-				diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-							  "Outg Other Failures           =%lu", channel->pInterfaceStat->outg.Other_Failures);
-			}
-		}
-		break;
-
-	case DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE:
-		if (channel->pInterfaceStat->mdm.Disc_Normal) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Normal        = %lu", channel->pInterfaceStat->mdm.Disc_Normal);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_Unspecified) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Unsp.         = %lu", channel->pInterfaceStat->mdm.Disc_Unspecified);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_Busy_Tone) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Busy Tone     = %lu", channel->pInterfaceStat->mdm.Disc_Busy_Tone);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_Congestion) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Congestion    = %lu", channel->pInterfaceStat->mdm.Disc_Congestion);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_Carr_Wait) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Carrier Wait  = %lu", channel->pInterfaceStat->mdm.Disc_Carr_Wait);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_Trn_Timeout) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Trn. T.o.     = %lu", channel->pInterfaceStat->mdm.Disc_Trn_Timeout);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_Incompat) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Incompatible  = %lu", channel->pInterfaceStat->mdm.Disc_Incompat);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_Frame_Rej) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc Frame Reject  = %lu", channel->pInterfaceStat->mdm.Disc_Frame_Rej);
-		}
-		if (channel->pInterfaceStat->mdm.Disc_V42bis) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "MDM Disc V.42bis       = %lu", channel->pInterfaceStat->mdm.Disc_V42bis);
-		}
-		break;
-
-	case DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE:
-		if (channel->pInterfaceStat->fax.Disc_Normal) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Normal        = %lu", channel->pInterfaceStat->fax.Disc_Normal);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Not_Ident) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Not Ident.    = %lu", channel->pInterfaceStat->fax.Disc_Not_Ident);
-		}
-		if (channel->pInterfaceStat->fax.Disc_No_Response) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc No Response   = %lu", channel->pInterfaceStat->fax.Disc_No_Response);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Retries) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Max Retries   = %lu", channel->pInterfaceStat->fax.Disc_Retries);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Unexp_Msg) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Unexp. Msg.        = %lu", channel->pInterfaceStat->fax.Disc_Unexp_Msg);
-		}
-		if (channel->pInterfaceStat->fax.Disc_No_Polling) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc No Polling    = %lu", channel->pInterfaceStat->fax.Disc_No_Polling);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Training) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Training      = %lu", channel->pInterfaceStat->fax.Disc_Training);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Unexpected) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Unexpected    = %lu", channel->pInterfaceStat->fax.Disc_Unexpected);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Application) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Application   = %lu", channel->pInterfaceStat->fax.Disc_Application);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Incompat) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Incompatible  = %lu", channel->pInterfaceStat->fax.Disc_Incompat);
-		}
-		if (channel->pInterfaceStat->fax.Disc_No_Command) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc No Command    = %lu", channel->pInterfaceStat->fax.Disc_No_Command);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Long_Msg) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Long Msg.     = %lu", channel->pInterfaceStat->fax.Disc_Long_Msg);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Supervisor) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Supervisor    = %lu", channel->pInterfaceStat->fax.Disc_Supervisor);
-		}
-		if (channel->pInterfaceStat->fax.Disc_SUB_SEP_PWD) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc SUP SEP PWD   = %lu", channel->pInterfaceStat->fax.Disc_SUB_SEP_PWD);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Invalid_Msg) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Invalid Msg.  = %lu", channel->pInterfaceStat->fax.Disc_Invalid_Msg);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Page_Coding) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Page Coding   = %lu", channel->pInterfaceStat->fax.Disc_Page_Coding);
-		}
-		if (channel->pInterfaceStat->fax.Disc_App_Timeout) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Appl. T.o.    = %lu", channel->pInterfaceStat->fax.Disc_App_Timeout);
-		}
-		if (channel->pInterfaceStat->fax.Disc_Unspecified) {
-			diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
-						  "FAX Disc Unspec.       = %lu", channel->pInterfaceStat->fax.Disc_Unspecified);
-		}
-		break;
-	}
-}
-
-/*
-  Receive trace information from the Management Interface and store it in the
-  internal trace buffer with MSG_TYPE_MLOG as is, without any filtering.
-  Event Filtering and formatting is done in  Management Interface self.
-*/
-static void diva_maint_trace_notify(void *user_context,
-				    diva_strace_library_interface_t *hLib,
-				    int Adapter,
-				    void *xlog_buffer,
-				    int length) {
-	diva_maint_client_t *pC = (diva_maint_client_t *)user_context;
-	diva_dbg_entry_head_t *pmsg;
-	word size;
-	dword sec, usec;
-	int ch = TraceFilterChannel;
-	int id = TraceFilterIdent;
-
-	/*
-	  Selective trace
-	*/
-	if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) &&
-	    (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) {
-		const char *p = NULL;
-		int ch_value = -1;
-		MI_XLOG_HDR *TrcData = (MI_XLOG_HDR *)xlog_buffer;
-
-		if (Adapter != clients[id].logical) {
-			return; /* Ignore all trace messages from other adapters */
-		}
-
-		if (TrcData->code == 24) {
-			p = (char *)&TrcData->code;
-			p += 2;
-		}
-
-		/*
-		  All L1 messages start as [dsp,ch], so we can filter this information
-		  and filter out all messages that use different channel
-		*/
-		if (p && p[0] == '[') {
-			if (p[2] == ',') {
-				p += 3;
-				ch_value = *p - '0';
-			} else if (p[3] == ',') {
-				p += 4;
-				ch_value = *p - '0';
-			}
-			if (ch_value >= 0) {
-				if (p[2] == ']') {
-					ch_value = ch_value * 10 + p[1] - '0';
-				}
-				if (ch_value != ch) {
-					return; /* Ignore other channels */
-				}
-			}
-		}
-
-	} else if (TraceFilter[0] != 0) {
-		return; /* Ignore trace if trace filter is activated, but idle */
-	}
-
-	diva_os_get_time(&sec, &usec);
-
-	while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
-							       (word)length + sizeof(*pmsg)))) {
-		if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
-			queueFreeMsg(dbg_queue);
-		} else {
-			break;
-		}
-	}
-	if (pmsg) {
-		memcpy(&pmsg[1], xlog_buffer, length);
-		pmsg->sequence    = dbg_sequence++;
-		pmsg->time_sec    = sec;
-		pmsg->time_usec   = usec;
-		pmsg->facility    = MSG_TYPE_MLOG;
-		pmsg->dli         = pC->logical;
-		pmsg->drv_id      = pC->hDbg->id;
-		pmsg->di_cpu      = 0;
-		pmsg->data_length = length;
-		queueCompleteMsg(pmsg);
-		if (queueCount(dbg_queue)) {
-			diva_maint_wakeup_read();
-		}
-	}
-}
-
-
-/*
-  Convert MAINT trace mask to management interface trace mask/work/facility and
-  issue command to management interface
-*/
-static void diva_change_management_debug_mask(diva_maint_client_t *pC, dword old_mask) {
-	if (pC->request && pC->hDbg && pC->pIdiLib) {
-		dword changed = pC->hDbg->dbgMask ^ old_mask;
-
-		if (changed & DIVA_MGT_DBG_TRACE) {
-			(*(pC->pIdiLib->DivaSTraceSetInfo))(pC->pIdiLib,
-							    (pC->hDbg->dbgMask & DIVA_MGT_DBG_TRACE) != 0);
-		}
-		if (changed & DIVA_MGT_DBG_DCHAN) {
-			(*(pC->pIdiLib->DivaSTraceSetDChannel))(pC->pIdiLib,
-								(pC->hDbg->dbgMask & DIVA_MGT_DBG_DCHAN) != 0);
-		}
-		if (!TraceFilter[0]) {
-			if (changed & DIVA_MGT_DBG_IFC_BCHANNEL) {
-				int i, state = ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0);
-
-				for (i = 0; i < pC->channels; i++) {
-					(*(pC->pIdiLib->DivaSTraceSetBChannel))(pC->pIdiLib, i + 1, state);
-				}
-			}
-			if (changed & DIVA_MGT_DBG_IFC_AUDIO) {
-				int i, state = ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0);
-
-				for (i = 0; i < pC->channels; i++) {
-					(*(pC->pIdiLib->DivaSTraceSetAudioTap))(pC->pIdiLib, i + 1, state);
-				}
-			}
-		}
-	}
-}
-
-
-void diva_mnt_internal_dprintf(dword drv_id, dword type, char *fmt, ...) {
-	va_list ap;
-
-	va_start(ap, fmt);
-	DI_format(0, (word)drv_id, (int)type, fmt, ap);
-	va_end(ap);
-}
-
-/*
-  Shutdown all adapters before driver removal
-*/
-int diva_mnt_shutdown_xdi_adapters(void) {
-	diva_os_spin_lock_magic_t old_irql, old_irql1;
-	int i, fret = 0;
-	byte *pmem;
-
-
-	for (i = 1; i < ARRAY_SIZE(clients); i++) {
-		pmem = NULL;
-
-		diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "unload");
-		diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "unload");
-
-		if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request) {
-			if ((*(clients[i].pIdiLib->DivaSTraceLibraryStop))(clients[i].pIdiLib) == 1) {
-				/*
-				  Adapter removal complete
-				*/
-				if (clients[i].pIdiLib) {
-					(*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib);
-					clients[i].pIdiLib = NULL;
-
-					pmem = clients[i].pmem;
-					clients[i].pmem = NULL;
-				}
-				clients[i].hDbg    = NULL;
-				clients[i].request_pending = 0;
-
-				if (clients[i].dma_handle >= 0) {
-					/*
-					  Free DMA handle
-					*/
-					diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle);
-					clients[i].dma_handle = -1;
-				}
-				clients[i].request = NULL;
-			} else {
-				fret = -1;
-			}
-		}
-
-		diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "unload");
-		if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request && clients[i].request_pending) {
-			clients[i].request_pending = 0;
-			(*(clients[i].request))((ENTITY *)(*(clients[i].pIdiLib->DivaSTraceGetHandle))(clients[i].pIdiLib->hLib));
-			if (clients[i].dma_handle >= 0) {
-				diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle);
-				clients[i].dma_handle = -1;
-			}
-		}
-		diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "unload");
-
-		if (pmem) {
-			diva_os_free(0, pmem);
-		}
-	}
-
-	return (fret);
-}
-
-/*
-  Set/Read the trace filter used for selective tracing.
-  Affects B- and Audio Tap trace mask at run time
-*/
-int diva_set_trace_filter(int filter_length, const char *filter) {
-	diva_os_spin_lock_magic_t old_irql, old_irql1;
-	int i, ch, on, client_b_on, client_atap_on;
-
-	diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
-
-	if (filter_length <= DIVA_MAX_SELECTIVE_FILTER_LENGTH) {
-		memcpy(&TraceFilter[0], filter, filter_length);
-		if (TraceFilter[filter_length]) {
-			TraceFilter[filter_length] = 0;
-		}
-		if (TraceFilter[0] == '*') {
-			TraceFilter[0] = 0;
-		}
-	} else {
-		filter_length = -1;
-	}
-
-	TraceFilterIdent   = -1;
-	TraceFilterChannel = -1;
-
-	on = (TraceFilter[0] == 0);
-
-	for (i = 1; i < ARRAY_SIZE(clients); i++) {
-		if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request) {
-			client_b_on    = on && ((clients[i].hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0);
-			client_atap_on = on && ((clients[i].hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO)    != 0);
-			for (ch = 0; ch < clients[i].channels; ch++) {
-				(*(clients[i].pIdiLib->DivaSTraceSetBChannel))(clients[i].pIdiLib->hLib, ch + 1, client_b_on);
-				(*(clients[i].pIdiLib->DivaSTraceSetAudioTap))(clients[i].pIdiLib->hLib, ch + 1, client_atap_on);
-			}
-		}
-	}
-
-	for (i = 1; i < ARRAY_SIZE(clients); i++) {
-		if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request && clients[i].request_pending) {
-			diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
-			clients[i].request_pending = 0;
-			(*(clients[i].request))((ENTITY *)(*(clients[i].pIdiLib->DivaSTraceGetHandle))(clients[i].pIdiLib->hLib));
-			diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
-		}
-	}
-
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
-	diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
-
-	return (filter_length);
-}
-
-int diva_get_trace_filter(int max_length, char *filter) {
-	diva_os_spin_lock_magic_t old_irql;
-	int len;
-
-	diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read_filter");
-	len = strlen(&TraceFilter[0]) + 1;
-	if (max_length >= len) {
-		memcpy(filter, &TraceFilter[0], len);
-	}
-	diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_filter");
-
-	return (len);
-}
-
-static int diva_dbg_cmp_key(const char *ref, const char *key) {
-	while (*key && (*ref++ == *key++));
-	return (!*key && !*ref);
-}
-
-/*
-  In case trace filter starts with "C" character then
-  all following characters are interpreted as command.
-  Following commands are available:
-  - single, trace single call at time, independent from CPN/CiPN
-*/
-static int diva_mnt_cmp_nmbr(const char *nmbr) {
-	const char *ref = &TraceFilter[0];
-	int ref_len = strlen(&TraceFilter[0]), nmbr_len = strlen(nmbr);
-
-	if (ref[0] == 'C') {
-		if (diva_dbg_cmp_key(&ref[1], "single")) {
-			return (0);
-		}
-		return (-1);
-	}
-
-	if (!ref_len || (ref_len > nmbr_len)) {
-		return (-1);
-	}
-
-	nmbr = nmbr + nmbr_len - 1;
-	ref  = ref  + ref_len  - 1;
-
-	while (ref_len--) {
-		if (*nmbr-- != *ref--) {
-			return (-1);
-		}
-	}
-
-	return (0);
-}
-
-static int diva_get_dma_descriptor(IDI_CALL request, dword *dma_magic) {
-	ENTITY e;
-	IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
-	if (!request) {
-		return (-1);
-	}
-
-	pReq->xdi_dma_descriptor_operation.Req = 0;
-	pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
-	pReq->xdi_dma_descriptor_operation.info.operation =     IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_number  = -1;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_magic   = 0;
-
-	(*request)((ENTITY *)pReq);
-
-	if (!pReq->xdi_dma_descriptor_operation.info.operation &&
-	    (pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) &&
-	    pReq->xdi_dma_descriptor_operation.info.descriptor_magic) {
-		*dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic;
-		return (pReq->xdi_dma_descriptor_operation.info.descriptor_number);
-	} else {
-		return (-1);
-	}
-}
-
-static void diva_free_dma_descriptor(IDI_CALL request, int nr) {
-	ENTITY e;
-	IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
-	if (!request || (nr < 0)) {
-		return;
-	}
-
-	pReq->xdi_dma_descriptor_operation.Req = 0;
-	pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
-	pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_number  = nr;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_magic   = 0;
-
-	(*request)((ENTITY *)pReq);
-}
diff --git a/drivers/isdn/hardware/eicon/debug_if.h b/drivers/isdn/hardware/eicon/debug_if.h
deleted file mode 100644
index fc5953a..0000000
--- a/drivers/isdn/hardware/eicon/debug_if.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- *
- Copyright (c) Eicon Technology Corporation, 2000.
- *
- This source file is supplied for the use with Eicon
- Technology Corporation's range of DIVA Server Adapters.
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DEBUG_IF_H__
-#define __DIVA_DEBUG_IF_H__
-#define MSG_TYPE_DRV_ID		0x0001
-#define MSG_TYPE_FLAGS		0x0002
-#define MSG_TYPE_STRING		0x0003
-#define MSG_TYPE_BINARY		0x0004
-#define MSG_TYPE_MLOG     0x0005
-
-#define MSG_FRAME_MAX_SIZE 2150
-
-typedef struct _diva_dbg_entry_head {
-	dword sequence;
-	dword time_sec;
-	dword time_usec;
-	dword facility;
-	dword dli;
-	dword drv_id;
-	dword di_cpu;
-	dword data_length;
-} diva_dbg_entry_head_t;
-
-int diva_maint_init(byte *base, unsigned long length, int do_init);
-void *diva_maint_finit(void);
-dword diva_dbg_q_length(void);
-diva_dbg_entry_head_t *diva_maint_get_message(word *size,
-					      diva_os_spin_lock_magic_t *old_irql);
-void diva_maint_ack_message(int do_release,
-			    diva_os_spin_lock_magic_t *old_irql);
-void diva_maint_prtComp(char *format, ...);
-void diva_maint_wakeup_read(void);
-int diva_get_driver_info(dword id, byte *data, int data_length);
-int diva_get_driver_dbg_mask(dword id, byte *data);
-int diva_set_driver_dbg_mask(dword id, dword mask);
-void diva_mnt_remove_xdi_adapter(const DESCRIPTOR *d);
-void diva_mnt_add_xdi_adapter(const DESCRIPTOR *d);
-int diva_mnt_shutdown_xdi_adapters(void);
-
-#define DIVA_MAX_SELECTIVE_FILTER_LENGTH 127
-int diva_set_trace_filter(int filter_length, const char *filter);
-int diva_get_trace_filter(int max_length, char *filter);
-
-
-#define DITRACE_CMD_GET_DRIVER_INFO   1
-#define DITRACE_READ_DRIVER_DBG_MASK  2
-#define DITRACE_WRITE_DRIVER_DBG_MASK 3
-#define DITRACE_READ_TRACE_ENTRY      4
-#define DITRACE_READ_TRACE_ENTRYS     5
-#define DITRACE_WRITE_SELECTIVE_TRACE_FILTER 6
-#define DITRACE_READ_SELECTIVE_TRACE_FILTER  7
-
-/*
-  Trace lavels for debug via management interface
-*/
-#define DIVA_MGT_DBG_TRACE          0x00000001 /* All trace messages from the card */
-#define DIVA_MGT_DBG_DCHAN          0x00000002 /* All D-channel relater trace messages */
-#define DIVA_MGT_DBG_MDM_PROGRESS   0x00000004 /* Modem progress events */
-#define DIVA_MGT_DBG_FAX_PROGRESS   0x00000008 /* Fax progress events */
-#define DIVA_MGT_DBG_IFC_STATISTICS 0x00000010 /* Interface call statistics */
-#define DIVA_MGT_DBG_MDM_STATISTICS 0x00000020 /* Global modem statistics   */
-#define DIVA_MGT_DBG_FAX_STATISTICS 0x00000040 /* Global call statistics    */
-#define DIVA_MGT_DBG_LINE_EVENTS    0x00000080 /* Line state events */
-#define DIVA_MGT_DBG_IFC_EVENTS     0x00000100 /* Interface/L1/L2 state events */
-#define DIVA_MGT_DBG_IFC_BCHANNEL   0x00000200 /* B-Channel trace for all channels */
-#define DIVA_MGT_DBG_IFC_AUDIO      0x00000400 /* Audio Tap trace for all channels */
-
-# endif /* DEBUG_IF___H */
diff --git a/drivers/isdn/hardware/eicon/debuglib.c b/drivers/isdn/hardware/eicon/debuglib.c
deleted file mode 100644
index d5b1092..0000000
--- a/drivers/isdn/hardware/eicon/debuglib.c
+++ /dev/null
@@ -1,156 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include "debuglib.h"
-
-#ifdef DIVA_NO_DEBUGLIB
-static DIVA_DI_PRINTF dprintf;
-#else /* DIVA_NO_DEBUGLIB */
-
-_DbgHandle_ myDriverDebugHandle = { 0 /*!Registered*/, DBG_HANDLE_VERSION };
-DIVA_DI_PRINTF dprintf = no_printf;
-/*****************************************************************************/
-#define DBG_FUNC(name)							\
-	void								\
-	myDbgPrint_##name(char *format, ...)				\
-	{ va_list ap;							\
-		if (myDriverDebugHandle.dbg_prt)			\
-		{ va_start(ap, format);				\
-			(myDriverDebugHandle.dbg_prt)			\
-				(myDriverDebugHandle.id, DLI_##name, format, ap); \
-			va_end(ap);					\
-		} }
-DBG_FUNC(LOG)
-DBG_FUNC(FTL)
-DBG_FUNC(ERR)
-DBG_FUNC(TRC)
-DBG_FUNC(MXLOG)
-DBG_FUNC(FTL_MXLOG)
-void
-myDbgPrint_EVL(long msgID, ...)
-{ va_list ap;
-	if (myDriverDebugHandle.dbg_ev)
-	{ va_start(ap, msgID);
-		(myDriverDebugHandle.dbg_ev)
-			(myDriverDebugHandle.id, (unsigned long)msgID, ap);
-		va_end(ap);
-	} }
-DBG_FUNC(REG)
-DBG_FUNC(MEM)
-DBG_FUNC(SPL)
-DBG_FUNC(IRP)
-DBG_FUNC(TIM)
-DBG_FUNC(BLK)
-DBG_FUNC(TAPI)
-DBG_FUNC(NDIS)
-DBG_FUNC(CONN)
-DBG_FUNC(STAT)
-DBG_FUNC(SEND)
-DBG_FUNC(RECV)
-DBG_FUNC(PRV0)
-DBG_FUNC(PRV1)
-DBG_FUNC(PRV2)
-DBG_FUNC(PRV3)
-/*****************************************************************************/
-int
-DbgRegister(char *drvName, char *drvTag, unsigned long dbgMask)
-{
-	int len;
-/*
- * deregister (if already registered) and zero out myDriverDebugHandle
- */
-	DbgDeregister();
-/*
- * initialize the debug handle
- */
-	myDriverDebugHandle.Version = DBG_HANDLE_VERSION;
-	myDriverDebugHandle.id  = -1;
-	myDriverDebugHandle.dbgMask = dbgMask | (DL_EVL | DL_FTL | DL_LOG);
-	len = strlen(drvName);
-	memcpy(myDriverDebugHandle.drvName, drvName,
-	       (len < sizeof(myDriverDebugHandle.drvName)) ?
-	       len : sizeof(myDriverDebugHandle.drvName) - 1);
-	len = strlen(drvTag);
-	memcpy(myDriverDebugHandle.drvTag, drvTag,
-	       (len < sizeof(myDriverDebugHandle.drvTag)) ?
-	       len : sizeof(myDriverDebugHandle.drvTag) - 1);
-/*
- * Try to register debugging via old (and only) interface
- */
-	dprintf("\000\377", &myDriverDebugHandle);
-	if (myDriverDebugHandle.dbg_prt)
-	{
-		return (1);
-	}
-/*
- * Check if we registered with an old maint driver (see debuglib.h)
- */
-	if (myDriverDebugHandle.dbg_end != NULL
-	     /* location of 'dbg_prt' in _OldDbgHandle_ struct */
-	     && (myDriverDebugHandle.regTime.LowPart ||
-		 myDriverDebugHandle.regTime.HighPart))
-		/* same location as in _OldDbgHandle_ struct */
-	{
-		dprintf("%s: Cannot log to old maint driver !", drvName);
-		myDriverDebugHandle.dbg_end =
-			((_OldDbgHandle_ *)&myDriverDebugHandle)->dbg_end;
-		DbgDeregister();
-	}
-	return (0);
-}
-/*****************************************************************************/
-void
-DbgSetLevel(unsigned long dbgMask)
-{
-	myDriverDebugHandle.dbgMask = dbgMask | (DL_EVL | DL_FTL | DL_LOG);
-}
-/*****************************************************************************/
-void
-DbgDeregister(void)
-{
-	if (myDriverDebugHandle.dbg_end)
-	{
-		(myDriverDebugHandle.dbg_end)(&myDriverDebugHandle);
-	}
-	memset(&myDriverDebugHandle, 0, sizeof(myDriverDebugHandle));
-}
-void xdi_dbg_xlog(char *x, ...) {
-	va_list ap;
-	va_start(ap, x);
-	if (myDriverDebugHandle.dbg_end &&
-	    (myDriverDebugHandle.dbg_irq || myDriverDebugHandle.dbg_old) &&
-	    (myDriverDebugHandle.dbgMask & DL_STAT)) {
-		if (myDriverDebugHandle.dbg_irq) {
-			(*(myDriverDebugHandle.dbg_irq))(myDriverDebugHandle.id,
-							 (x[0] != 0) ? DLI_TRC : DLI_XLOG, x, ap);
-		} else {
-			(*(myDriverDebugHandle.dbg_old))(myDriverDebugHandle.id, x, ap);
-		}
-	}
-	va_end(ap);
-}
-/*****************************************************************************/
-#endif /* DIVA_NO_DEBUGLIB */
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h
deleted file mode 100644
index 6dcbf6a..0000000
--- a/drivers/isdn/hardware/eicon/debuglib.h
+++ /dev/null
@@ -1,322 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#if !defined(__DEBUGLIB_H__)
-#define __DEBUGLIB_H__
-#include <stdarg.h>
-/*
- * define global debug priorities
- */
-#define DL_LOG  0x00000001 /* always worth mentioning */
-#define DL_FTL  0x00000002 /* always sampled error    */
-#define DL_ERR  0x00000004 /* any kind of error       */
-#define DL_TRC  0x00000008 /* verbose information     */
-#define DL_XLOG  0x00000010 /* old xlog info           */
-#define DL_MXLOG 0x00000020 /* maestra xlog info    */
-#define DL_FTL_MXLOG 0x00000021 /* fatal maestra xlog info */
-#define DL_EVL  0x00000080 /* special NT eventlog msg */
-#define DL_COMPAT (DL_MXLOG | DL_XLOG)
-#define DL_PRIOR_MASK (DL_EVL | DL_COMPAT | DL_TRC | DL_ERR | DL_FTL | DL_LOG)
-#define DLI_LOG  0x0100
-#define DLI_FTL  0x0200
-#define DLI_ERR  0x0300
-#define DLI_TRC  0x0400
-#define DLI_XLOG 0x0500
-#define DLI_MXLOG 0x0600
-#define DLI_FTL_MXLOG 0x0600
-#define DLI_EVL  0x0800
-/*
- * define OS (operating system interface) debuglevel
- */
-#define DL_REG  0x00000100 /* init/query registry     */
-#define DL_MEM  0x00000200 /* memory management       */
-#define DL_SPL  0x00000400 /* event/spinlock handling */
-#define DL_IRP  0x00000800 /* I/O request handling    */
-#define DL_TIM  0x00001000 /* timer/watchdog handling */
-#define DL_BLK  0x00002000 /* raw data block contents */
-#define DL_OS_MASK (DL_BLK | DL_TIM | DL_IRP | DL_SPL | DL_MEM | DL_REG)
-#define DLI_REG  0x0900
-#define DLI_MEM  0x0A00
-#define DLI_SPL  0x0B00
-#define DLI_IRP  0x0C00
-#define DLI_TIM  0x0D00
-#define DLI_BLK  0x0E00
-/*
- * define ISDN (connection interface) debuglevel
- */
-#define DL_TAPI  0x00010000 /* debug TAPI interface    */
-#define DL_NDIS  0x00020000 /* debug NDIS interface    */
-#define DL_CONN  0x00040000 /* connection handling     */
-#define DL_STAT  0x00080000 /* trace state machines    */
-#define DL_SEND  0x00100000 /* trace raw xmitted data  */
-#define DL_RECV  0x00200000 /* trace raw received data */
-#define DL_DATA  (DL_SEND | DL_RECV)
-#define DL_ISDN_MASK (DL_DATA | DL_STAT | DL_CONN | DL_NDIS | DL_TAPI)
-#define DLI_TAPI 0x1100
-#define DLI_NDIS 0x1200
-#define DLI_CONN 0x1300
-#define DLI_STAT 0x1400
-#define DLI_SEND 0x1500
-#define DLI_RECV 0x1600
-/*
- * define some private (unspecified) debuglevel
- */
-#define DL_PRV0  0x01000000
-#define DL_PRV1  0x02000000
-#define DL_PRV2  0x04000000
-#define DL_PRV3  0x08000000
-#define DL_PRIV_MASK (DL_PRV0 | DL_PRV1 | DL_PRV2 | DL_PRV3)
-#define DLI_PRV0 0x1900
-#define DLI_PRV1 0x1A00
-#define DLI_PRV2 0x1B00
-#define DLI_PRV3 0x1C00
-#define DT_INDEX(x)  ((x) & 0x000F)
-#define DL_INDEX(x)  ((((x) >> 8) & 0x00FF) - 1)
-#define DLI_NAME(x)  ((x) & 0xFF00)
-/*
- * Debug mask for kernel mode tracing, if set the output is also sent to
- * the system debug function. Requires that the project is compiled
- * with _KERNEL_DBG_PRINT_
- */
-#define DL_TO_KERNEL    0x40000000
-
-#ifdef DIVA_NO_DEBUGLIB
-#define myDbgPrint_LOG(x...) do { } while (0);
-#define myDbgPrint_FTL(x...) do { } while (0);
-#define myDbgPrint_ERR(x...) do { } while (0);
-#define myDbgPrint_TRC(x...) do { } while (0);
-#define myDbgPrint_MXLOG(x...) do { } while (0);
-#define myDbgPrint_EVL(x...) do { } while (0);
-#define myDbgPrint_REG(x...) do { } while (0);
-#define myDbgPrint_MEM(x...) do { } while (0);
-#define myDbgPrint_SPL(x...) do { } while (0);
-#define myDbgPrint_IRP(x...) do { } while (0);
-#define myDbgPrint_TIM(x...) do { } while (0);
-#define myDbgPrint_BLK(x...) do { } while (0);
-#define myDbgPrint_TAPI(x...) do { } while (0);
-#define myDbgPrint_NDIS(x...) do { } while (0);
-#define myDbgPrint_CONN(x...) do { } while (0);
-#define myDbgPrint_STAT(x...) do { } while (0);
-#define myDbgPrint_SEND(x...) do { } while (0);
-#define myDbgPrint_RECV(x...) do { } while (0);
-#define myDbgPrint_PRV0(x...) do { } while (0);
-#define myDbgPrint_PRV1(x...) do { } while (0);
-#define myDbgPrint_PRV2(x...) do { } while (0);
-#define myDbgPrint_PRV3(x...) do { } while (0);
-#define DBG_TEST(func, args) do { } while (0);
-#define DBG_EVL_ID(args) do { } while (0);
-
-#else /* DIVA_NO_DEBUGLIB */
-/*
- * define low level macros for formatted & raw debugging
- */
-#define DBG_DECL(func) extern void  myDbgPrint_##func(char *, ...);
-DBG_DECL(LOG)
-DBG_DECL(FTL)
-DBG_DECL(ERR)
-DBG_DECL(TRC)
-DBG_DECL(MXLOG)
-DBG_DECL(FTL_MXLOG)
-extern void  myDbgPrint_EVL(long, ...);
-DBG_DECL(REG)
-DBG_DECL(MEM)
-DBG_DECL(SPL)
-DBG_DECL(IRP)
-DBG_DECL(TIM)
-DBG_DECL(BLK)
-DBG_DECL(TAPI)
-DBG_DECL(NDIS)
-DBG_DECL(CONN)
-DBG_DECL(STAT)
-DBG_DECL(SEND)
-DBG_DECL(RECV)
-DBG_DECL(PRV0)
-DBG_DECL(PRV1)
-DBG_DECL(PRV2)
-DBG_DECL(PRV3)
-#ifdef _KERNEL_DBG_PRINT_
-/*
- * tracing to maint and kernel if selected in the trace mask.
- */
-#define DBG_TEST(func, args)						\
-	{ if ((myDriverDebugHandle.dbgMask) & (unsigned long)DL_##func) \
-		{							\
-			if ((myDriverDebugHandle.dbgMask) & DL_TO_KERNEL) \
-			{ DbgPrint args; DbgPrint("\r\n"); }		\
-			myDbgPrint_##func args;			\
-		} }
-#else
-/*
- * Standard tracing to maint driver.
- */
-#define DBG_TEST(func, args)						\
-	{ if ((myDriverDebugHandle.dbgMask) & (unsigned long)DL_##func) \
-		{ myDbgPrint_##func args;				\
-		} }
-#endif
-/*
- * For event level debug use a separate define, the parameter are
- * different and cause compiler errors on some systems.
- */
-#define DBG_EVL_ID(args)						\
-	{ if ((myDriverDebugHandle.dbgMask) & (unsigned long)DL_EVL)	\
-		{ myDbgPrint_EVL args;					\
-		} }
-
-#endif /* DIVA_NO_DEBUGLIB */
-
-#define DBG_LOG(args)  DBG_TEST(LOG, args)
-#define DBG_FTL(args)  DBG_TEST(FTL, args)
-#define DBG_ERR(args)  DBG_TEST(ERR, args)
-#define DBG_TRC(args)  DBG_TEST(TRC, args)
-#define DBG_MXLOG(args)  DBG_TEST(MXLOG, args)
-#define DBG_FTL_MXLOG(args) DBG_TEST(FTL_MXLOG, args)
-#define DBG_EVL(args)  DBG_EVL_ID(args)
-#define DBG_REG(args)  DBG_TEST(REG, args)
-#define DBG_MEM(args)  DBG_TEST(MEM, args)
-#define DBG_SPL(args)  DBG_TEST(SPL, args)
-#define DBG_IRP(args)  DBG_TEST(IRP, args)
-#define DBG_TIM(args)  DBG_TEST(TIM, args)
-#define DBG_BLK(args)  DBG_TEST(BLK, args)
-#define DBG_TAPI(args)  DBG_TEST(TAPI, args)
-#define DBG_NDIS(args)  DBG_TEST(NDIS, args)
-#define DBG_CONN(args)  DBG_TEST(CONN, args)
-#define DBG_STAT(args)  DBG_TEST(STAT, args)
-#define DBG_SEND(args)  DBG_TEST(SEND, args)
-#define DBG_RECV(args)  DBG_TEST(RECV, args)
-#define DBG_PRV0(args)  DBG_TEST(PRV0, args)
-#define DBG_PRV1(args)  DBG_TEST(PRV1, args)
-#define DBG_PRV2(args)  DBG_TEST(PRV2, args)
-#define DBG_PRV3(args)  DBG_TEST(PRV3, args)
-/*
- * prototypes for debug register/deregister functions in "debuglib.c"
- */
-#ifdef DIVA_NO_DEBUGLIB
-#define DbgRegister(name, tag, mask) do { } while (0)
-#define DbgDeregister() do { } while (0)
-#define DbgSetLevel(mask) do { } while (0)
-#else
-extern DIVA_DI_PRINTF dprintf;
-extern int  DbgRegister(char *drvName, char *drvTag, unsigned long dbgMask);
-extern void DbgDeregister(void);
-extern void DbgSetLevel(unsigned long dbgMask);
-#endif
-/*
- * driver internal structure for debug handling;
- * in client drivers this structure is maintained in "debuglib.c",
- * in the debug driver "debug.c" maintains a chain of such structs.
- */
-typedef struct _DbgHandle_ *pDbgHandle;
-typedef void (*DbgEnd)(pDbgHandle);
-typedef void (*DbgLog)(unsigned short, int, char *, va_list);
-typedef void (*DbgOld)(unsigned short, char *, va_list);
-typedef void (*DbgEv)(unsigned short, unsigned long, va_list);
-typedef void (*DbgIrq)(unsigned short, int, char *, va_list);
-typedef struct _DbgHandle_
-{ char    Registered; /* driver successfully registered */
-#define DBG_HANDLE_REG_NEW 0x01  /* this (new) structure    */
-#define DBG_HANDLE_REG_OLD 0x7f  /* old structure (see below)  */
-	char    Version;  /* version of this structure  */
-#define DBG_HANDLE_VERSION 1   /* contains dbg_old function now */
-#define DBG_HANDLE_VER_EXT  2           /* pReserved points to extended info*/
-	short               id;   /* internal id of registered driver */
-	struct _DbgHandle_ *next;   /* ptr to next registered driver    */
-	struct /*LARGE_INTEGER*/ {
-		unsigned long LowPart;
-		long          HighPart;
-	}     regTime;  /* timestamp for registration       */
-	void               *pIrp;   /* ptr to pending i/o request       */
-	unsigned long       dbgMask;  /* current debug mask               */
-	char                drvName[128]; /* ASCII name of registered driver  */
-	char                drvTag[64]; /* revision string     */
-	DbgEnd              dbg_end;  /* function for debug closing       */
-	DbgLog              dbg_prt;  /* function for debug appending     */
-	DbgOld              dbg_old;  /* function for old debug appending */
-	DbgEv       dbg_ev;  /* function for Windows NT Eventlog */
-	DbgIrq    dbg_irq;  /* function for irql checked debug  */
-	void      *pReserved3;
-} _DbgHandle_;
-extern _DbgHandle_ myDriverDebugHandle;
-typedef struct _OldDbgHandle_
-{ struct _OldDbgHandle_ *next;
-	void                *pIrp;
-	long    regTime[2];
-	unsigned long       dbgMask;
-	short               id;
-	char                drvName[78];
-	DbgEnd              dbg_end;
-	DbgLog              dbg_prt;
-} _OldDbgHandle_;
-/* the differences in DbgHandles
-   old:    tmp:     new:
-   0 long next  char Registered  char Registered
-   char filler   char Version
-   short id    short id
-   4 long pIrp  long    regTime.lo  long next
-   8 long    regTime.lo long    regTime.hi  long    regTime.lo
-   12 long    regTime.hi long next   long regTime.hi
-   16 long dbgMask  long pIrp   long pIrp
-   20 short id   long dbgMask   long dbgMask
-   22 char    drvName[78] ..
-   24 ..     char drvName[16]  char drvName[16]
-   40 ..     char drvTag[64]  char drvTag[64]
-   100 void *dbg_end ..      ..
-   104 void *dbg_prt void *dbg_end  void *dbg_end
-   108 ..     void *dbg_prt  void *dbg_prt
-   112 ..     ..      void *dbg_old
-   116 ..     ..      void *dbg_ev
-   120 ..     ..      void *dbg_irq
-   124 ..     ..      void *pReserved3
-   ( new->id == 0 && *((short *)&new->dbgMask) == -1 ) identifies "old",
-   new->Registered and new->Version overlay old->next,
-   new->next overlays old->pIrp, new->regTime matches old->regTime and
-   thus these fields can be maintained in new struct whithout trouble;
-   id, dbgMask, drvName, dbg_end and dbg_prt need special handling !
-*/
-#define DBG_EXT_TYPE_CARD_TRACE     0x00000001
-typedef struct
-{
-	unsigned long ExtendedType;
-	union
-	{
-		/* DBG_EXT_TYPE_CARD_TRACE */
-		struct
-		{
-			void (*MaskChangedNotify)(void *pContext);
-			unsigned long ModuleTxtMask;
-			unsigned long DebugLevel;
-			unsigned long B_ChannelMask;
-			unsigned long LogBufferSize;
-		} CardTrace;
-	} Data;
-} _DbgExtendedInfo_;
-#ifndef DIVA_NO_DEBUGLIB
-/* -------------------------------------------------------------
-   Function used for xlog-style debug
-   ------------------------------------------------------------- */
-#define XDI_USE_XLOG 1
-void xdi_dbg_xlog(char *x, ...);
-#endif /* DIVA_NO_DEBUGLIB */
-#endif /* __DEBUGLIB_H__ */
diff --git a/drivers/isdn/hardware/eicon/dfifo.h b/drivers/isdn/hardware/eicon/dfifo.h
deleted file mode 100644
index 6a1d333..0000000
--- a/drivers/isdn/hardware/eicon/dfifo.h
+++ /dev/null
@@ -1,54 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_IDI_DFIFO_INC__
-#define __DIVA_IDI_DFIFO_INC__
-#define DIVA_DFIFO_CACHE_SZ   64 /* Used to isolate pipe from
-				    rest of the world
-				    should be divisible by 4
-				 */
-#define DIVA_DFIFO_RAW_SZ    (2512 * 8)
-#define DIVA_DFIFO_DATA_SZ   68
-#define DIVA_DFIFO_HDR_SZ    4
-#define DIVA_DFIFO_SEGMENT_SZ  (DIVA_DFIFO_DATA_SZ + DIVA_DFIFO_HDR_SZ)
-#define DIVA_DFIFO_SEGMENTS   ((DIVA_DFIFO_RAW_SZ) / (DIVA_DFIFO_SEGMENT_SZ) + 1)
-#define DIVA_DFIFO_MEM_SZ (						\
-		(DIVA_DFIFO_SEGMENT_SZ) * (DIVA_DFIFO_SEGMENTS) +	\
-		(DIVA_DFIFO_CACHE_SZ) * 2				\
-		)
-#define DIVA_DFIFO_STEP DIVA_DFIFO_SEGMENT_SZ
-/* -------------------------------------------------------------------------
-   Block header layout is:
-   byte[0] -> flags
-   byte[1] -> length of data in block
-   byte[2] -> reserved
-   byte[4] -> reserved
-   ------------------------------------------------------------------------- */
-#define DIVA_DFIFO_WRAP   0x80 /* This is the last block in fifo   */
-#define DIVA_DFIFO_READY  0x40 /* This block is ready for processing */
-#define DIVA_DFIFO_LAST   0x20 /* This block is last in message      */
-#define DIVA_DFIFO_AUTO   0x10 /* Don't look for 'ready', don't ack */
-int diva_dfifo_create(void *start, int length);
-#endif
diff --git a/drivers/isdn/hardware/eicon/di.c b/drivers/isdn/hardware/eicon/di.c
deleted file mode 100644
index cd3fba1..0000000
--- a/drivers/isdn/hardware/eicon/di.c
+++ /dev/null
@@ -1,835 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "di.h"
-#if !defined USE_EXTENDED_DEBUGS
-#include "dimaint.h"
-#else
-#define dprintf
-#endif
-#include "io.h"
-#include "dfifo.h"
-#define PR_RAM  ((struct pr_ram *)0)
-#define RAM ((struct dual *)0)
-/*------------------------------------------------------------------*/
-/* local function prototypes                                        */
-/*------------------------------------------------------------------*/
-void pr_out(ADAPTER *a);
-byte pr_dpc(ADAPTER *a);
-static byte pr_ready(ADAPTER *a);
-static byte isdn_rc(ADAPTER *, byte, byte, byte, word, dword, dword);
-static byte isdn_ind(ADAPTER *, byte, byte, byte, PBUFFER *, byte, word);
-/* -----------------------------------------------------------------
-   Functions used for the extended XDI Debug
-   macros
-   global convergence counter (used by all adapters)
-   Look by the implementation part of the functions
-   about the parameters.
-   If you change the dubugging parameters, then you should update
-   the aididbg.doc in the IDI doc's.
-   ----------------------------------------------------------------- */
-#if defined(XDI_USE_XLOG)
-#define XDI_A_NR(_x_) ((byte)(((ISDN_ADAPTER *)(_x_->io))->ANum))
-static void xdi_xlog(byte *msg, word code, int length);
-static byte xdi_xlog_sec = 0;
-#else
-#define XDI_A_NR(_x_) ((byte)0)
-#endif
-static void xdi_xlog_rc_event(byte Adapter,
-			      byte Id, byte Ch, byte Rc, byte cb, byte type);
-static void xdi_xlog_request(byte Adapter, byte Id,
-			     byte Ch, byte Req, byte type);
-static void xdi_xlog_ind(byte Adapter,
-			 byte Id,
-			 byte Ch,
-			 byte Ind,
-			 byte rnr_valid,
-			 byte rnr,
-			 byte type);
-/*------------------------------------------------------------------*/
-/* output function                                                  */
-/*------------------------------------------------------------------*/
-void pr_out(ADAPTER *a)
-{
-	byte e_no;
-	ENTITY *this = NULL;
-	BUFFERS *X;
-	word length;
-	word i;
-	word clength;
-	REQ *ReqOut;
-	byte more;
-	byte ReadyCount;
-	byte ReqCount;
-	byte Id;
-	dtrc(dprintf("pr_out"));
-	/* while a request is pending ...                           */
-	e_no = look_req(a);
-	if (!e_no)
-	{
-		dtrc(dprintf("no_req"));
-		return;
-	}
-	ReadyCount = pr_ready(a);
-	if (!ReadyCount)
-	{
-		dtrc(dprintf("not_ready"));
-		return;
-	}
-	ReqCount = 0;
-	while (e_no && ReadyCount) {
-		next_req(a);
-		this = entity_ptr(a, e_no);
-#ifdef USE_EXTENDED_DEBUGS
-		if (!this)
-		{
-			DBG_FTL(("XDI: [%02x] !A%d ==> NULL entity ptr - try to ignore",
-				 xdi_xlog_sec++, (int)((ISDN_ADAPTER *)a->io)->ANum))
-				e_no = look_req(a);
-			ReadyCount--;
-			continue;
-		}
-		{
-			DBG_TRC((">A%d Id=0x%x Req=0x%x", ((ISDN_ADAPTER *)a->io)->ANum, this->Id, this->Req))
-				}
-#else
-		dbug(dprintf("out:Req=%x,Id=%x,Ch=%x", this->Req, this->Id, this->ReqCh));
-#endif
-		/* get address of next available request buffer             */
-		ReqOut = (REQ *)&PR_RAM->B[a->ram_inw(a, &PR_RAM->NextReq)];
-#if defined(DIVA_ISTREAM)
-		if (!(a->tx_stream[this->Id]   &&
-		      this->Req == N_DATA)) {
-#endif
-			/* now copy the data from the current data buffer into the  */
-			/* adapters request buffer                                  */
-			length = 0;
-			i = this->XCurrent;
-			X = PTR_X(a, this);
-			while (i < this->XNum && length < 270) {
-				clength = min((word)(270 - length), (word)(X[i].PLength-this->XOffset));
-				a->ram_out_buffer(a,
-						  &ReqOut->XBuffer.P[length],
-						  PTR_P(a, this, &X[i].P[this->XOffset]),
-						  clength);
-				length += clength;
-				this->XOffset += clength;
-				if (this->XOffset == X[i].PLength) {
-					this->XCurrent = (byte)++i;
-					this->XOffset = 0;
-				}
-			}
-#if defined(DIVA_ISTREAM)
-		} else { /* Use CMA extension in order to transfer data to the card */
-			i = this->XCurrent;
-			X = PTR_X(a, this);
-			while (i < this->XNum) {
-				diva_istream_write(a,
-						   this->Id,
-						   PTR_P(a, this, &X[i].P[0]),
-						   X[i].PLength,
-						   ((i + 1) == this->XNum),
-						   0, 0);
-				this->XCurrent = (byte)++i;
-			}
-			length = 0;
-		}
-#endif
-		a->ram_outw(a, &ReqOut->XBuffer.length, length);
-		a->ram_out(a, &ReqOut->ReqId, this->Id);
-		a->ram_out(a, &ReqOut->ReqCh, this->ReqCh);
-		/* if it's a specific request (no ASSIGN) ...                */
-		if (this->Id & 0x1f) {
-			/* if buffers are left in the list of data buffers do       */
-			/* do chaining (LL_MDATA, N_MDATA)                          */
-			this->More++;
-			if (i < this->XNum && this->MInd) {
-				xdi_xlog_request(XDI_A_NR(a), this->Id, this->ReqCh, this->MInd,
-						 a->IdTypeTable[this->No]);
-				a->ram_out(a, &ReqOut->Req, this->MInd);
-				more = true;
-			}
-			else {
-				xdi_xlog_request(XDI_A_NR(a), this->Id, this->ReqCh, this->Req,
-						 a->IdTypeTable[this->No]);
-				this->More |= XMOREF;
-				a->ram_out(a, &ReqOut->Req, this->Req);
-				more = false;
-				if (a->FlowControlIdTable[this->ReqCh] == this->Id)
-					a->FlowControlSkipTable[this->ReqCh] = true;
-				/*
-				  Note that remove request was sent to the card
-				*/
-				if (this->Req == REMOVE) {
-					a->misc_flags_table[e_no] |= DIVA_MISC_FLAGS_REMOVE_PENDING;
-				}
-			}
-			/* if we did chaining, this entity is put back into the     */
-			/* request queue                                            */
-			if (more) {
-				req_queue(a, this->No);
-			}
-		}
-		/* else it's a ASSIGN                                       */
-		else {
-			/* save the request code used for buffer chaining           */
-			this->MInd = 0;
-			if (this->Id == BLLC_ID) this->MInd = LL_MDATA;
-			if (this->Id == NL_ID ||
-			    this->Id == TASK_ID ||
-			    this->Id == MAN_ID
-				) this->MInd = N_MDATA;
-			/* send the ASSIGN                                          */
-			a->IdTypeTable[this->No] = this->Id;
-			xdi_xlog_request(XDI_A_NR(a), this->Id, this->ReqCh, this->Req, this->Id);
-			this->More |= XMOREF;
-			a->ram_out(a, &ReqOut->Req, this->Req);
-			/* save the reference of the ASSIGN                         */
-			assign_queue(a, this->No, a->ram_inw(a, &ReqOut->Reference));
-		}
-		a->ram_outw(a, &PR_RAM->NextReq, a->ram_inw(a, &ReqOut->next));
-		ReadyCount--;
-		ReqCount++;
-		e_no = look_req(a);
-	}
-	/* send the filled request buffers to the ISDN adapter      */
-	a->ram_out(a, &PR_RAM->ReqInput,
-		   (byte)(a->ram_in(a, &PR_RAM->ReqInput) + ReqCount));
-	/* if it is a 'unreturncoded' UREMOVE request, remove the  */
-	/* Id from our table after sending the request             */
-	if (this && (this->Req == UREMOVE) && this->Id) {
-		Id = this->Id;
-		e_no = a->IdTable[Id];
-		free_entity(a, e_no);
-		for (i = 0; i < 256; i++)
-		{
-			if (a->FlowControlIdTable[i] == Id)
-				a->FlowControlIdTable[i] = 0;
-		}
-		a->IdTable[Id] = 0;
-		this->Id = 0;
-	}
-}
-static byte pr_ready(ADAPTER *a)
-{
-	byte ReadyCount;
-	ReadyCount = (byte)(a->ram_in(a, &PR_RAM->ReqOutput) -
-			    a->ram_in(a, &PR_RAM->ReqInput));
-	if (!ReadyCount) {
-		if (!a->ReadyInt) {
-			a->ram_inc(a, &PR_RAM->ReadyInt);
-			a->ReadyInt++;
-		}
-	}
-	return ReadyCount;
-}
-/*------------------------------------------------------------------*/
-/* isdn interrupt handler                                           */
-/*------------------------------------------------------------------*/
-byte pr_dpc(ADAPTER *a)
-{
-	byte Count;
-	RC *RcIn;
-	IND *IndIn;
-	byte c;
-	byte RNRId;
-	byte Rc;
-	byte Ind;
-	/* if return codes are available ...                        */
-	if ((Count = a->ram_in(a, &PR_RAM->RcOutput)) != 0) {
-		dtrc(dprintf("#Rc=%x", Count));
-		/* get the buffer address of the first return code          */
-		RcIn = (RC *)&PR_RAM->B[a->ram_inw(a, &PR_RAM->NextRc)];
-		/* for all return codes do ...                              */
-		while (Count--) {
-			if ((Rc = a->ram_in(a, &RcIn->Rc)) != 0) {
-				dword tmp[2];
-				/*
-				  Get extended information, associated with return code
-				*/
-				a->ram_in_buffer(a,
-						 &RcIn->Reserved2[0],
-						 (byte *)&tmp[0],
-						 8);
-				/* call return code handler, if it is not our return code   */
-				/* the handler returns 2                                    */
-				/* for all return codes we process, we clear the Rc field   */
-				isdn_rc(a,
-					Rc,
-					a->ram_in(a, &RcIn->RcId),
-					a->ram_in(a, &RcIn->RcCh),
-					a->ram_inw(a, &RcIn->Reference),
-					tmp[0],  /* type of extended information */
-					tmp[1]); /* extended information        */
-				a->ram_out(a, &RcIn->Rc, 0);
-			}
-			/* get buffer address of next return code                   */
-			RcIn = (RC *)&PR_RAM->B[a->ram_inw(a, &RcIn->next)];
-		}
-		/* clear all return codes (no chaining!)                    */
-		a->ram_out(a, &PR_RAM->RcOutput, 0);
-		/* call output function                                     */
-		pr_out(a);
-	}
-	/* clear RNR flag                                           */
-	RNRId = 0;
-	/* if indications are available ...                         */
-	if ((Count = a->ram_in(a, &PR_RAM->IndOutput)) != 0) {
-		dtrc(dprintf("#Ind=%x", Count));
-		/* get the buffer address of the first indication           */
-		IndIn = (IND *)&PR_RAM->B[a->ram_inw(a, &PR_RAM->NextInd)];
-		/* for all indications do ...                               */
-		while (Count--) {
-			/* if the application marks an indication as RNR, all       */
-			/* indications from the same Id delivered in this interrupt */
-			/* are marked RNR                                           */
-			if (RNRId && RNRId == a->ram_in(a, &IndIn->IndId)) {
-				a->ram_out(a, &IndIn->Ind, 0);
-				a->ram_out(a, &IndIn->RNR, true);
-			}
-			else {
-				Ind = a->ram_in(a, &IndIn->Ind);
-				if (Ind) {
-					RNRId = 0;
-					/* call indication handler, a return value of 2 means chain */
-					/* a return value of 1 means RNR                            */
-					/* for all indications we process, we clear the Ind field   */
-					c = isdn_ind(a,
-						     Ind,
-						     a->ram_in(a, &IndIn->IndId),
-						     a->ram_in(a, &IndIn->IndCh),
-						     &IndIn->RBuffer,
-						     a->ram_in(a, &IndIn->MInd),
-						     a->ram_inw(a, &IndIn->MLength));
-					if (c == 1) {
-						dtrc(dprintf("RNR"));
-						a->ram_out(a, &IndIn->Ind, 0);
-						RNRId = a->ram_in(a, &IndIn->IndId);
-						a->ram_out(a, &IndIn->RNR, true);
-					}
-				}
-			}
-			/* get buffer address of next indication                    */
-			IndIn = (IND *)&PR_RAM->B[a->ram_inw(a, &IndIn->next)];
-		}
-		a->ram_out(a, &PR_RAM->IndOutput, 0);
-	}
-	return false;
-}
-byte scom_test_int(ADAPTER *a)
-{
-	return a->ram_in(a, (void *)0x3fe);
-}
-void scom_clear_int(ADAPTER *a)
-{
-	a->ram_out(a, (void *)0x3fe, 0);
-}
-/*------------------------------------------------------------------*/
-/* return code handler                                              */
-/*------------------------------------------------------------------*/
-static byte isdn_rc(ADAPTER *a,
-		    byte Rc,
-		    byte Id,
-		    byte Ch,
-		    word Ref,
-		    dword extended_info_type,
-		    dword extended_info)
-{
-	ENTITY *this;
-	byte e_no;
-	word i;
-	int cancel_rc;
-#ifdef USE_EXTENDED_DEBUGS
-	{
-		DBG_TRC(("<A%d Id=0x%x Rc=0x%x", ((ISDN_ADAPTER *)a->io)->ANum, Id, Rc))
-			}
-#else
-	dbug(dprintf("isdn_rc(Rc=%x,Id=%x,Ch=%x)", Rc, Id, Ch));
-#endif
-	/* check for ready interrupt                                */
-	if (Rc == READY_INT) {
-		xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 0, 0);
-		if (a->ReadyInt) {
-			a->ReadyInt--;
-			return 0;
-		}
-		return 2;
-	}
-	/* if we know this Id ...                                   */
-	e_no = a->IdTable[Id];
-	if (e_no) {
-		this = entity_ptr(a, e_no);
-		xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 0, a->IdTypeTable[this->No]);
-		this->RcCh = Ch;
-		/* if it is a return code to a REMOVE request, remove the   */
-		/* Id from our table                                        */
-		if ((a->misc_flags_table[e_no] & DIVA_MISC_FLAGS_REMOVE_PENDING) &&
-		    (Rc == OK)) {
-			if (a->IdTypeTable[e_no] == NL_ID) {
-				if (a->RcExtensionSupported &&
-				    (extended_info_type != DIVA_RC_TYPE_REMOVE_COMPLETE)) {
-					dtrc(dprintf("XDI: N-REMOVE, A(%02x) Id:%02x, ignore RC=OK",
-						     XDI_A_NR(a), Id));
-					return (0);
-				}
-				if (extended_info_type == DIVA_RC_TYPE_REMOVE_COMPLETE)
-					a->RcExtensionSupported = true;
-			}
-			a->misc_flags_table[e_no] &= ~DIVA_MISC_FLAGS_REMOVE_PENDING;
-			a->misc_flags_table[e_no] &= ~DIVA_MISC_FLAGS_NO_RC_CANCELLING;
-			free_entity(a, e_no);
-			for (i = 0; i < 256; i++)
-			{
-				if (a->FlowControlIdTable[i] == Id)
-					a->FlowControlIdTable[i] = 0;
-			}
-			a->IdTable[Id] = 0;
-			this->Id = 0;
-			/* ---------------------------------------------------------------
-			   If we send N_DISC or N_DISK_ACK after we have received OK_FC
-			   then the card will respond with OK_FC and later with RC==OK.
-			   If we send N_REMOVE in this state we will receive only RC==OK
-			   This will create the state in that the XDI is waiting for the
-			   additional RC and does not delivery the RC to the client. This
-			   code corrects the counter of outstanding RC's in this case.
-			   --------------------------------------------------------------- */
-			if ((this->More & XMOREC) > 1) {
-				this->More &= ~XMOREC;
-				this->More |= 1;
-				dtrc(dprintf("XDI: correct MORE on REMOVE A(%02x) Id:%02x",
-					     XDI_A_NR(a), Id));
-			}
-		}
-		if (Rc == OK_FC) {
-			a->FlowControlIdTable[Ch] = Id;
-			a->FlowControlSkipTable[Ch] = false;
-			this->Rc = Rc;
-			this->More &= ~(XBUSY | XMOREC);
-			this->complete = 0xff;
-			xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
-			CALLBACK(a, this);
-			return 0;
-		}
-		/*
-		  New protocol code sends return codes that comes from release
-		  of flow control condition marked with DIVA_RC_TYPE_OK_FC extended
-		  information element type.
-		  If like return code arrives then application is able to process
-		  all return codes self and XDI should not cances return codes.
-		  This return code does not decrement XMOREC partial return code
-		  counter due to fact that it was no request for this return code,
-		  also XMOREC was not incremented.
-		*/
-		if (extended_info_type == DIVA_RC_TYPE_OK_FC) {
-			a->misc_flags_table[e_no] |= DIVA_MISC_FLAGS_NO_RC_CANCELLING;
-			this->Rc = Rc;
-			this->complete = 0xff;
-			xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
-			DBG_TRC(("XDI OK_FC A(%02x) Id:%02x Ch:%02x Rc:%02x",
-				 XDI_A_NR(a), Id, Ch, Rc))
-				CALLBACK(a, this);
-			return 0;
-		}
-		cancel_rc = !(a->misc_flags_table[e_no] & DIVA_MISC_FLAGS_NO_RC_CANCELLING);
-		if (cancel_rc && (a->FlowControlIdTable[Ch] == Id))
-		{
-			a->FlowControlIdTable[Ch] = 0;
-			if ((Rc != OK) || !a->FlowControlSkipTable[Ch])
-			{
-				this->Rc = Rc;
-				if (Ch == this->ReqCh)
-				{
-					this->More &= ~(XBUSY | XMOREC);
-					this->complete = 0xff;
-				}
-				xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
-				CALLBACK(a, this);
-			}
-			return 0;
-		}
-		if (this->More & XMOREC)
-			this->More--;
-		/* call the application callback function                   */
-		if (((!cancel_rc) || (this->More & XMOREF)) && !(this->More & XMOREC)) {
-			this->Rc = Rc;
-			this->More &= ~XBUSY;
-			this->complete = 0xff;
-			xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
-			CALLBACK(a, this);
-		}
-		return 0;
-	}
-	/* if it's an ASSIGN return code check if it's a return     */
-	/* code to an ASSIGN request from us                        */
-	if ((Rc & 0xf0) == ASSIGN_RC) {
-		e_no = get_assign(a, Ref);
-		if (e_no) {
-			this = entity_ptr(a, e_no);
-			this->Id = Id;
-			xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 2, a->IdTypeTable[this->No]);
-			/* call the application callback function                   */
-			this->Rc = Rc;
-			this->More &= ~XBUSY;
-			this->complete = 0xff;
-#if defined(DIVA_ISTREAM) /* { */
-			if ((Rc == ASSIGN_OK) && a->ram_offset &&
-			    (a->IdTypeTable[this->No] == NL_ID) &&
-			    ((extended_info_type == DIVA_RC_TYPE_RX_DMA) ||
-			     (extended_info_type == DIVA_RC_TYPE_CMA_PTR)) &&
-			    extended_info) {
-				dword offset = (*(a->ram_offset)) (a);
-				dword tmp[2];
-				extended_info -= offset;
-#ifdef PLATFORM_GT_32BIT
-				a->ram_in_dw(a, (void *)ULongToPtr(extended_info), (dword *)&tmp[0], 2);
-#else
-				a->ram_in_dw(a, (void *)extended_info, (dword *)&tmp[0], 2);
-#endif
-				a->tx_stream[Id]  = tmp[0];
-				a->rx_stream[Id]  = tmp[1];
-				if (extended_info_type == DIVA_RC_TYPE_RX_DMA) {
-					DBG_TRC(("Id=0x%x RxDMA=%08x:%08x",
-						 Id, a->tx_stream[Id], a->rx_stream[Id]))
-						a->misc_flags_table[this->No] |= DIVA_MISC_FLAGS_RX_DMA;
-				} else {
-					DBG_TRC(("Id=0x%x CMA=%08x:%08x",
-						 Id, a->tx_stream[Id], a->rx_stream[Id]))
-						a->misc_flags_table[this->No] &= ~DIVA_MISC_FLAGS_RX_DMA;
-					a->rx_pos[Id]     = 0;
-					a->rx_stream[Id] -= offset;
-				}
-				a->tx_pos[Id]     = 0;
-				a->tx_stream[Id] -= offset;
-			} else {
-				a->tx_stream[Id] = 0;
-				a->rx_stream[Id] = 0;
-				a->misc_flags_table[this->No] &= ~DIVA_MISC_FLAGS_RX_DMA;
-			}
-#endif /* } */
-			CALLBACK(a, this);
-			if (Rc == ASSIGN_OK) {
-				a->IdTable[Id] = e_no;
-			}
-			else
-			{
-				free_entity(a, e_no);
-				for (i = 0; i < 256; i++)
-				{
-					if (a->FlowControlIdTable[i] == Id)
-						a->FlowControlIdTable[i] = 0;
-				}
-				a->IdTable[Id] = 0;
-				this->Id = 0;
-			}
-			return 1;
-		}
-	}
-	return 2;
-}
-/*------------------------------------------------------------------*/
-/* indication handler                                               */
-/*------------------------------------------------------------------*/
-static byte isdn_ind(ADAPTER *a,
-		     byte Ind,
-		     byte Id,
-		     byte Ch,
-		     PBUFFER *RBuffer,
-		     byte MInd,
-		     word MLength)
-{
-	ENTITY *this;
-	word clength;
-	word offset;
-	BUFFERS *R;
-	byte *cma = NULL;
-#ifdef USE_EXTENDED_DEBUGS
-	{
-		DBG_TRC(("<A%d Id=0x%x Ind=0x%x", ((ISDN_ADAPTER *)a->io)->ANum, Id, Ind))
-			}
-#else
-	dbug(dprintf("isdn_ind(Ind=%x,Id=%x,Ch=%x)", Ind, Id, Ch));
-#endif
-	if (a->IdTable[Id]) {
-		this = entity_ptr(a, a->IdTable[Id]);
-		this->IndCh = Ch;
-		xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
-			     0/* rnr_valid */, 0 /* rnr */, a->IdTypeTable[this->No]);
-		/* if the Receive More flag is not yet set, this is the     */
-		/* first buffer of the packet                               */
-		if (this->RCurrent == 0xff) {
-			/* check for receive buffer chaining                        */
-			if (Ind == this->MInd) {
-				this->complete = 0;
-				this->Ind = MInd;
-			}
-			else {
-				this->complete = 1;
-				this->Ind = Ind;
-			}
-			/* call the application callback function for the receive   */
-			/* look ahead                                               */
-			this->RLength = MLength;
-#if defined(DIVA_ISTREAM)
-			if ((a->rx_stream[this->Id] ||
-			     (a->misc_flags_table[this->No] & DIVA_MISC_FLAGS_RX_DMA)) &&
-			    ((Ind == N_DATA) ||
-			     (a->protocol_capabilities & PROTCAP_CMA_ALLPR))) {
-				PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io;
-				if (a->misc_flags_table[this->No] & DIVA_MISC_FLAGS_RX_DMA) {
-#if defined(DIVA_IDI_RX_DMA)
-					dword d;
-					diva_get_dma_map_entry(\
-						(struct _diva_dma_map_entry *)IoAdapter->dma_map,
-						(int)a->rx_stream[this->Id], (void **)&cma, &d);
-#else
-					cma = &a->stream_buffer[0];
-					cma[0] = cma[1] = cma[2] = cma[3] = 0;
-#endif
-					this->RLength = MLength = (word)*(dword *)cma;
-					cma += 4;
-				} else {
-					int final = 0;
-					cma = &a->stream_buffer[0];
-					this->RLength = MLength = (word)diva_istream_read(a,
-											  Id,
-											  cma,
-											  sizeof(a->stream_buffer),
-											  &final, NULL, NULL);
-				}
-				IoAdapter->RBuffer.length = min(MLength, (word)270);
-				if (IoAdapter->RBuffer.length != MLength) {
-					this->complete = 0;
-				} else {
-					this->complete = 1;
-				}
-				memcpy(IoAdapter->RBuffer.P, cma, IoAdapter->RBuffer.length);
-				this->RBuffer = (DBUFFER *)&IoAdapter->RBuffer;
-			}
-#endif
-			if (!cma) {
-				a->ram_look_ahead(a, RBuffer, this);
-			}
-			this->RNum = 0;
-			CALLBACK(a, this);
-			/* map entity ptr, selector could be re-mapped by call to   */
-			/* IDI from within callback                                 */
-			this = entity_ptr(a, a->IdTable[Id]);
-			xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
-				     1/* rnr_valid */, this->RNR/* rnr */, a->IdTypeTable[this->No]);
-			/* check for RNR                                            */
-			if (this->RNR == 1) {
-				this->RNR = 0;
-				return 1;
-			}
-			/* if no buffers are provided by the application, the       */
-			/* application want to copy the data itself including       */
-			/* N_MDATA/LL_MDATA chaining                                */
-			if (!this->RNR && !this->RNum) {
-				xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
-					     2/* rnr_valid */, 0/* rnr */, a->IdTypeTable[this->No]);
-				return 0;
-			}
-			/* if there is no RNR, set the More flag                    */
-			this->RCurrent = 0;
-			this->ROffset = 0;
-		}
-		if (this->RNR == 2) {
-			if (Ind != this->MInd) {
-				this->RCurrent = 0xff;
-				this->RNR = 0;
-			}
-			return 0;
-		}
-		/* if we have received buffers from the application, copy   */
-		/* the data into these buffers                              */
-		offset = 0;
-		R = PTR_R(a, this);
-		do {
-			if (this->ROffset == R[this->RCurrent].PLength) {
-				this->ROffset = 0;
-				this->RCurrent++;
-			}
-			if (cma) {
-				clength = min(MLength, (word)(R[this->RCurrent].PLength-this->ROffset));
-			} else {
-				clength = min(a->ram_inw(a, &RBuffer->length)-offset,
-					      R[this->RCurrent].PLength-this->ROffset);
-			}
-			if (R[this->RCurrent].P) {
-				if (cma) {
-					memcpy(PTR_P(a, this, &R[this->RCurrent].P[this->ROffset]),
-					       &cma[offset],
-					       clength);
-				} else {
-					a->ram_in_buffer(a,
-							 &RBuffer->P[offset],
-							 PTR_P(a, this, &R[this->RCurrent].P[this->ROffset]),
-							 clength);
-				}
-			}
-			offset += clength;
-			this->ROffset += clength;
-			if (cma) {
-				if (offset >= MLength) {
-					break;
-				}
-				continue;
-			}
-		} while (offset < (a->ram_inw(a, &RBuffer->length)));
-		/* if it's the last buffer of the packet, call the          */
-		/* application callback function for the receive complete   */
-		/* call                                                     */
-		if (Ind != this->MInd) {
-			R[this->RCurrent].PLength = this->ROffset;
-			if (this->ROffset) this->RCurrent++;
-			this->RNum = this->RCurrent;
-			this->RCurrent = 0xff;
-			this->Ind = Ind;
-			this->complete = 2;
-			xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
-				     3/* rnr_valid */, 0/* rnr */, a->IdTypeTable[this->No]);
-			CALLBACK(a, this);
-		}
-		return 0;
-	}
-	return 2;
-}
-#if defined(XDI_USE_XLOG)
-/* -----------------------------------------------------------
-   This function works in the same way as xlog on the
-   active board
-   ----------------------------------------------------------- */
-static void xdi_xlog(byte *msg, word code, int length) {
-	xdi_dbg_xlog("\x00\x02", msg, code, length);
-}
-#endif
-/* -----------------------------------------------------------
-   This function writes the information about the Return Code
-   processing in the trace buffer. Trace ID is 221.
-   INPUT:
-   Adapter - system unicue adapter number (0 ... 255)
-   Id      - Id of the entity that had sent this return code
-   Ch      - Channel of the entity that had sent this return code
-   Rc      - return code value
-   cb:       (0...2)
-   switch (cb) {
-   case 0: printf ("DELIVERY"); break;
-   case 1: printf ("CALLBACK"); break;
-   case 2: printf ("ASSIGN"); break;
-   }
-   DELIVERY - have entered isdn_rc with this RC
-   CALLBACK - about to make callback to the application
-   for this RC
-   ASSIGN   - about to make callback for RC that is result
-   of ASSIGN request. It is no DELIVERY message
-   before of this message
-   type   - the Id that was sent by the ASSIGN of this entity.
-   This should be global Id like NL_ID, DSIG_ID, MAN_ID.
-   An unknown Id will cause "?-" in the front of the request.
-   In this case the log.c is to be extended.
-   ----------------------------------------------------------- */
-static void xdi_xlog_rc_event(byte Adapter,
-			      byte Id, byte Ch, byte Rc, byte cb, byte type) {
-#if defined(XDI_USE_XLOG)
-	word LogInfo[4];
-	PUT_WORD(&LogInfo[0], ((word)Adapter | (word)(xdi_xlog_sec++ << 8)));
-	PUT_WORD(&LogInfo[1], ((word)Id | (word)(Ch << 8)));
-	PUT_WORD(&LogInfo[2], ((word)Rc | (word)(type << 8)));
-	PUT_WORD(&LogInfo[3], cb);
-	xdi_xlog((byte *)&LogInfo[0], 221, sizeof(LogInfo));
-#endif
-}
-/* ------------------------------------------------------------------------
-   This function writes the information about the request processing
-   in the trace buffer. Trace ID is 220.
-   INPUT:
-   Adapter - system unicue adapter number (0 ... 255)
-   Id      - Id of the entity that had sent this request
-   Ch      - Channel of the entity that had sent this request
-   Req     - Code of the request
-   type    - the Id that was sent by the ASSIGN of this entity.
-   This should be global Id like NL_ID, DSIG_ID, MAN_ID.
-   An unknown Id will cause "?-" in the front of the request.
-   In this case the log.c is to be extended.
-   ------------------------------------------------------------------------ */
-static void xdi_xlog_request(byte Adapter, byte Id,
-			     byte Ch, byte Req, byte type) {
-#if defined(XDI_USE_XLOG)
-	word LogInfo[3];
-	PUT_WORD(&LogInfo[0], ((word)Adapter | (word)(xdi_xlog_sec++ << 8)));
-	PUT_WORD(&LogInfo[1], ((word)Id | (word)(Ch << 8)));
-	PUT_WORD(&LogInfo[2], ((word)Req | (word)(type << 8)));
-	xdi_xlog((byte *)&LogInfo[0], 220, sizeof(LogInfo));
-#endif
-}
-/* ------------------------------------------------------------------------
-   This function writes the information about the indication processing
-   in the trace buffer. Trace ID is 222.
-   INPUT:
-   Adapter - system unicue adapter number (0 ... 255)
-   Id      - Id of the entity that had sent this indication
-   Ch      - Channel of the entity that had sent this indication
-   Ind     - Code of the indication
-   rnr_valid: (0 .. 3) supported
-   switch (rnr_valid) {
-   case 0: printf ("DELIVERY"); break;
-   case 1: printf ("RNR=%d", rnr);
-   case 2: printf ("RNum=0");
-   case 3: printf ("COMPLETE");
-   }
-   DELIVERY - indication entered isdn_rc function
-   RNR=...  - application had returned RNR=... after the
-   look ahead callback
-   RNum=0   - application had not returned any buffer to copy
-   this indication and will copy it self
-   COMPLETE - XDI had copied the data to the buffers provided
-   bu the application and is about to issue the
-   final callback
-   rnr:  Look case 1 of the rnr_valid
-   type: the Id that was sent by the ASSIGN of this entity. This should
-   be global Id like NL_ID, DSIG_ID, MAN_ID. An unknown Id will
-   cause "?-" in the front of the request. In this case the
-   log.c is to be extended.
-   ------------------------------------------------------------------------ */
-static void xdi_xlog_ind(byte Adapter,
-			 byte Id,
-			 byte Ch,
-			 byte Ind,
-			 byte rnr_valid,
-			 byte rnr,
-			 byte type) {
-#if defined(XDI_USE_XLOG)
-	word LogInfo[4];
-	PUT_WORD(&LogInfo[0], ((word)Adapter | (word)(xdi_xlog_sec++ << 8)));
-	PUT_WORD(&LogInfo[1], ((word)Id | (word)(Ch << 8)));
-	PUT_WORD(&LogInfo[2], ((word)Ind | (word)(type << 8)));
-	PUT_WORD(&LogInfo[3], ((word)rnr | (word)(rnr_valid << 8)));
-	xdi_xlog((byte *)&LogInfo[0], 222, sizeof(LogInfo));
-#endif
-}
diff --git a/drivers/isdn/hardware/eicon/di.h b/drivers/isdn/hardware/eicon/di.h
deleted file mode 100644
index ff26c65..0000000
--- a/drivers/isdn/hardware/eicon/di.h
+++ /dev/null
@@ -1,118 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-/*
- *  some macros for detailed trace management
- */
-#include "di_dbg.h"
-/*****************************************************************************/
-#define XMOREC 0x1f
-#define XMOREF 0x20
-#define XBUSY  0x40
-#define RMORE  0x80
-#define DIVA_MISC_FLAGS_REMOVE_PENDING    0x01
-#define DIVA_MISC_FLAGS_NO_RC_CANCELLING  0x02
-#define DIVA_MISC_FLAGS_RX_DMA            0x04
-/* structure for all information we have to keep on a per   */
-/* adapater basis                                           */
-typedef struct adapter_s ADAPTER;
-struct adapter_s {
-	void *io;
-	byte IdTable[256];
-	byte IdTypeTable[256];
-	byte FlowControlIdTable[256];
-	byte FlowControlSkipTable[256];
-	byte ReadyInt;
-	byte RcExtensionSupported;
-	byte misc_flags_table[256];
-	dword protocol_capabilities;
-	byte (*ram_in)(ADAPTER *a, void *adr);
-	word (*ram_inw)(ADAPTER *a, void *adr);
-	void (*ram_in_buffer)(ADAPTER *a, void *adr, void *P, word length);
-	void (*ram_look_ahead)(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e);
-	void (*ram_out)(ADAPTER *a, void *adr, byte data);
-	void (*ram_outw)(ADAPTER *a, void *adr, word data);
-	void (*ram_out_buffer)(ADAPTER *a, void *adr, void *P, word length);
-	void (*ram_inc)(ADAPTER *a, void *adr);
-#if defined(DIVA_ISTREAM)
-	dword rx_stream[256];
-	dword tx_stream[256];
-	word tx_pos[256];
-	word rx_pos[256];
-	byte stream_buffer[2512];
-	dword (*ram_offset)(ADAPTER *a);
-	void (*ram_out_dw)(ADAPTER *a,
-			   void *addr,
-			   const dword *data,
-			   int dwords);
-	void (*ram_in_dw)(ADAPTER *a,
-			  void *addr,
-			  dword *data,
-			  int dwords);
-	void (*istream_wakeup)(ADAPTER *a);
-#else
-	byte stream_buffer[4];
-#endif
-};
-/*------------------------------------------------------------------*/
-/* public functions of IDI common code                              */
-/*------------------------------------------------------------------*/
-void pr_out(ADAPTER *a);
-byte pr_dpc(ADAPTER *a);
-byte scom_test_int(ADAPTER *a);
-void scom_clear_int(ADAPTER *a);
-/*------------------------------------------------------------------*/
-/* OS specific functions used by IDI common code                    */
-/*------------------------------------------------------------------*/
-void free_entity(ADAPTER *a, byte e_no);
-void assign_queue(ADAPTER *a, byte e_no, word ref);
-byte get_assign(ADAPTER *a, word ref);
-void req_queue(ADAPTER *a, byte e_no);
-byte look_req(ADAPTER *a);
-void next_req(ADAPTER *a);
-ENTITY *entity_ptr(ADAPTER *a, byte e_no);
-#if defined(DIVA_ISTREAM)
-struct _diva_xdi_stream_interface;
-void diva_xdi_provide_istream_info(ADAPTER *a,
-				   struct _diva_xdi_stream_interface *pI);
-void pr_stream(ADAPTER *a);
-int diva_istream_write(void *context,
-		       int Id,
-		       void *data,
-		       int length,
-		       int final,
-		       byte usr1,
-		       byte usr2);
-int diva_istream_read(void *context,
-		      int Id,
-		      void *data,
-		      int max_length,
-		      int *final,
-		      byte *usr1,
-		      byte *usr2);
-#if defined(DIVA_IDI_RX_DMA)
-#include "diva_dma.h"
-#endif
-#endif
diff --git a/drivers/isdn/hardware/eicon/di_dbg.h b/drivers/isdn/hardware/eicon/di_dbg.h
deleted file mode 100644
index 1380b60..0000000
--- a/drivers/isdn/hardware/eicon/di_dbg.h
+++ /dev/null
@@ -1,37 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DI_DBG_INC__
-#define __DIVA_DI_DBG_INC__
-#if !defined(dtrc)
-#define dtrc(a)
-#endif
-#if !defined(dbug)
-#define dbug(a)
-#endif
-#if !defined USE_EXTENDED_DEBUGS
-extern void (*dprintf)(char*, ...);
-#endif
-#endif
diff --git a/drivers/isdn/hardware/eicon/di_defs.h b/drivers/isdn/hardware/eicon/di_defs.h
deleted file mode 100644
index a5094d2..0000000
--- a/drivers/isdn/hardware/eicon/di_defs.h
+++ /dev/null
@@ -1,181 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef _DI_DEFS_
-#define _DI_DEFS_
-/* typedefs for our data structures                         */
-typedef struct get_name_s GET_NAME;
-/*  The entity_s structure is used to pass all
-    parameters between application and IDI   */
-typedef struct entity_s ENTITY;
-typedef struct buffers_s BUFFERS;
-typedef struct postcall_s POSTCALL;
-typedef struct get_para_s GET_PARA;
-#define BOARD_NAME_LENGTH 9
-#define IDI_CALL_LINK_T
-#define IDI_CALL_ENTITY_T
-/* typedef void ( * IDI_CALL)(ENTITY *); */
-/* --------------------------------------------------------
-   IDI_CALL
-   -------------------------------------------------------- */
-typedef void (IDI_CALL_LINK_T *IDI_CALL)(ENTITY IDI_CALL_ENTITY_T *);
-typedef struct {
-	word length;          /* length of data/parameter field           */
-	byte P[270];          /* data/parameter field                     */
-} DBUFFER;
-struct get_name_s {
-	word command;         /* command = 0x0100 */
-	byte name[BOARD_NAME_LENGTH];
-};
-struct postcall_s {
-	word      command;                           /* command = 0x0300 */
-	word      dummy;                             /* not used */
-	void      (*callback)(void *);      /* call back */
-	void      *context;                          /* context pointer */
-};
-#define REQ_PARA            0x0600   /* request command line parameters */
-#define REQ_PARA_LEN             1   /* number of data bytes */
-#define L1_STARTUP_DOWN_POS      0   /* '-y' command line parameter in......*/
-#define L1_STARTUP_DOWN_MSK   0x01   /* first byte position (index 0) with value 0x01 */
-struct get_para_s {
-	word  command;            /* command = 0x0600 */
-	byte  len;                /* max length of para field in bytes */
-	byte  para[REQ_PARA_LEN]; /* parameter field */
-};
-struct buffers_s {
-	word PLength;
-	byte *P;
-};
-struct entity_s {
-	byte                  Req;            /* pending request          */
-	byte                  Rc;             /* return code received     */
-	byte                  Ind;            /* indication received      */
-	byte                  ReqCh;          /* channel of current Req   */
-	byte                  RcCh;           /* channel of current Rc    */
-	byte                  IndCh;          /* channel of current Ind   */
-	byte                  Id;             /* ID used by this entity   */
-	byte                  GlobalId;       /* reserved field           */
-	byte                  XNum;           /* number of X-buffers      */
-	byte                  RNum;           /* number of R-buffers      */
-	BUFFERS               *X;             /* pointer to X-buffer list */
-	BUFFERS               *R;             /* pointer to R-buffer list */
-	word                  RLength;        /* length of current R-data */
-	DBUFFER               *RBuffer;       /* buffer of current R-data */
-	byte                  RNR;            /* receive not ready flag   */
-	byte                  complete;       /* receive complete status  */
-	IDI_CALL              callback;
-	word                  user[2];
-	/* fields used by the driver internally                     */
-	byte                  No;             /* entity number            */
-	byte                  reserved2;      /* reserved field           */
-	byte                  More;           /* R/X More flags           */
-	byte                  MInd;           /* MDATA coding for this ID */
-	byte                  XCurrent;       /* current transmit buffer  */
-	byte                  RCurrent;       /* current receive buffer   */
-	word                  XOffset;        /* offset in x-buffer       */
-	word                  ROffset;        /* offset in r-buffer       */
-};
-typedef struct {
-	byte                  type;
-	byte                  channels;
-	word                  features;
-	IDI_CALL              request;
-} DESCRIPTOR;
-/* descriptor type field coding */
-#define IDI_ADAPTER_S           1
-#define IDI_ADAPTER_PR          2
-#define IDI_ADAPTER_DIVA        3
-#define IDI_ADAPTER_MAESTRA     4
-#define IDI_VADAPTER            0x40
-#define IDI_DRIVER              0x80
-#define IDI_DADAPTER            0xfd
-#define IDI_DIDDPNP             0xfe
-#define IDI_DIMAINT             0xff
-/* Hardware IDs ISA PNP */
-#define HW_ID_DIVA_PRO     3    /* same as IDI_ADAPTER_DIVA    */
-#define HW_ID_MAESTRA      4    /* same as IDI_ADAPTER_MAESTRA */
-#define HW_ID_PICCOLA      5
-#define HW_ID_DIVA_PRO20   6
-#define HW_ID_DIVA20       7
-#define HW_ID_DIVA_PRO20_U 8
-#define HW_ID_DIVA20_U     9
-#define HW_ID_DIVA30       10
-#define HW_ID_DIVA30_U     11
-/* Hardware IDs PCI */
-#define HW_ID_EICON_PCI              0x1133
-#define HW_ID_SIEMENS_PCI            0x8001 /* unused SubVendor ID for Siemens Cornet-N cards */
-#define HW_ID_PROTTYPE_CORNETN       0x0014 /* SubDevice ID for Siemens Cornet-N cards */
-#define HW_ID_FUJITSU_SIEMENS_PCI    0x110A /* SubVendor ID for Fujitsu Siemens */
-#define HW_ID_GS03_PCI               0x0021 /* SubDevice ID for Fujitsu Siemens ISDN S0 card */
-#define HW_ID_DIVA_PRO20_PCI         0xe001
-#define HW_ID_DIVA20_PCI             0xe002
-#define HW_ID_DIVA_PRO20_PCI_U       0xe003
-#define HW_ID_DIVA20_PCI_U           0xe004
-#define HW_ID_DIVA201_PCI            0xe005
-#define HW_ID_DIVA_CT_ST             0xe006
-#define HW_ID_DIVA_CT_U              0xe007
-#define HW_ID_DIVA_CTL_ST            0xe008
-#define HW_ID_DIVA_CTL_U             0xe009
-#define HW_ID_DIVA_ISDN_V90_PCI      0xe00a
-#define HW_ID_DIVA202_PCI_ST         0xe00b
-#define HW_ID_DIVA202_PCI_U          0xe00c
-#define HW_ID_DIVA_PRO30_PCI         0xe00d
-#define HW_ID_MAESTRA_PCI            0xe010
-#define HW_ID_MAESTRAQ_PCI           0xe012
-#define HW_ID_DSRV_Q8M_V2_PCI        0xe013
-#define HW_ID_MAESTRAP_PCI           0xe014
-#define HW_ID_DSRV_P30M_V2_PCI       0xe015
-#define HW_ID_DSRV_VOICE_Q8M_PCI     0xe016
-#define HW_ID_DSRV_VOICE_Q8M_V2_PCI  0xe017
-#define HW_ID_DSRV_B2M_V2_PCI        0xe018
-#define HW_ID_DSRV_VOICE_P30M_V2_PCI 0xe019
-#define HW_ID_DSRV_B2F_PCI           0xe01a
-#define HW_ID_DSRV_VOICE_B2M_V2_PCI  0xe01b
-/* Hardware IDs USB */
-#define EICON_USB_VENDOR_ID          0x071D
-#define HW_ID_DIVA_USB_REV1          0x1000
-#define HW_ID_DIVA_USB_REV2          0x1003
-#define HW_ID_TELEDAT_SURF_USB_REV2  0x1004
-#define HW_ID_TELEDAT_SURF_USB_REV1  0x2000
-/* --------------------------------------------------------------------------
-   Adapter array change notification framework
-   -------------------------------------------------------------------------- */
-typedef void (IDI_CALL_LINK_T *didd_adapter_change_callback_t)(void IDI_CALL_ENTITY_T *context, DESCRIPTOR *adapter, int removal);
-/* -------------------------------------------------------------------------- */
-#define DI_VOICE          0x0 /* obsolete define */
-#define DI_FAX3           0x1
-#define DI_MODEM          0x2
-#define DI_POST           0x4
-#define DI_V110           0x8
-#define DI_V120           0x10
-#define DI_POTS           0x20
-#define DI_CODEC          0x40
-#define DI_MANAGE         0x80
-#define DI_V_42           0x0100
-#define DI_EXTD_FAX       0x0200 /* Extended FAX (ECM, 2D, T.6, Polling) */
-#define DI_AT_PARSER      0x0400 /* Build-in AT Parser in the L2 */
-#define DI_VOICE_OVER_IP  0x0800 /* Voice over IP support */
-typedef void (IDI_CALL_LINK_T *_IDI_CALL)(void *, ENTITY *);
-#endif
diff --git a/drivers/isdn/hardware/eicon/did_vers.h b/drivers/isdn/hardware/eicon/did_vers.h
deleted file mode 100644
index fa8db82..0000000
--- a/drivers/isdn/hardware/eicon/did_vers.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-static char diva_didd_common_code_build[] = "102-51";
diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
deleted file mode 100644
index b0b23ed..0000000
--- a/drivers/isdn/hardware/eicon/diddfunc.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/* $Id: diddfunc.c,v 1.14.6.2 2004/08/28 20:03:53 armin Exp $
- *
- * DIDD Interface module for Eicon active cards.
- *
- * Functions are in dadapter.c
- *
- * Copyright 2002-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2002-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "dadapter.h"
-#include "divasync.h"
-
-#define DBG_MINIMUM  (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT  (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-
-extern void DIVA_DIDD_Read(void *, int);
-extern char *DRIVERRELEASE_DIDD;
-static dword notify_handle;
-static DESCRIPTOR _DAdapter;
-
-/*
- * didd callback function
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
-			   int removal)
-{
-	if (adapter->type == IDI_DADAPTER) {
-		DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."))
-			return (NULL);
-	} else if (adapter->type == IDI_DIMAINT) {
-		if (removal) {
-			DbgDeregister();
-		} else {
-			DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT);
-		}
-	}
-	return (NULL);
-}
-
-/*
- * connect to didd
- */
-static int __init connect_didd(void)
-{
-	int x = 0;
-	int dadapter = 0;
-	IDI_SYNC_REQ req;
-	DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
-	DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
-	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-		if (DIDD_Table[x].type == IDI_DADAPTER) {	/* DADAPTER found */
-			dadapter = 1;
-			memcpy(&_DAdapter, &DIDD_Table[x], sizeof(_DAdapter));
-			req.didd_notify.e.Req = 0;
-			req.didd_notify.e.Rc =
-				IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
-			req.didd_notify.info.callback = (void *)didd_callback;
-			req.didd_notify.info.context = NULL;
-			_DAdapter.request((ENTITY *)&req);
-			if (req.didd_notify.e.Rc != 0xff)
-				return (0);
-			notify_handle = req.didd_notify.info.handle;
-		} else if (DIDD_Table[x].type == IDI_DIMAINT) {	/* MAINT found */
-			DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT);
-		}
-	}
-	return (dadapter);
-}
-
-/*
- * disconnect from didd
- */
-static void __exit disconnect_didd(void)
-{
-	IDI_SYNC_REQ req;
-
-	req.didd_notify.e.Req = 0;
-	req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
-	req.didd_notify.info.handle = notify_handle;
-	_DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * init
- */
-int __init diddfunc_init(void)
-{
-	diva_didd_load_time_init();
-
-	if (!connect_didd()) {
-		DBG_ERR(("init: failed to connect to DIDD."))
-			diva_didd_load_time_finit();
-		return (0);
-	}
-	return (1);
-}
-
-/*
- * finit
- */
-void __exit diddfunc_finit(void)
-{
-	DbgDeregister();
-	disconnect_didd();
-	diva_didd_load_time_finit();
-}
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
deleted file mode 100644
index 1b25d8b..0000000
--- a/drivers/isdn/hardware/eicon/diva.c
+++ /dev/null
@@ -1,666 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: diva.c,v 1.21.4.1 2004/05/08 14:33:43 armin Exp $ */
-
-#define CARDTYPE_H_WANT_DATA            1
-#define CARDTYPE_H_WANT_IDI_DATA        0
-#define CARDTYPE_H_WANT_RESOURCE_DATA   0
-#define CARDTYPE_H_WANT_FILE_DATA       0
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "di_defs.h"
-#include "di.h"
-#include "io.h"
-#include "pc_maint.h"
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "diva_pci.h"
-#include "diva.h"
-
-#ifdef CONFIG_ISDN_DIVAS_PRIPCI
-#include "os_pri.h"
-#endif
-#ifdef CONFIG_ISDN_DIVAS_BRIPCI
-#include "os_bri.h"
-#include "os_4bri.h"
-#endif
-
-PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-extern IDI_CALL Requests[MAX_ADAPTER];
-extern int create_adapter_proc(diva_os_xdi_adapter_t *a);
-extern void remove_adapter_proc(diva_os_xdi_adapter_t *a);
-
-#define DivaIdiReqFunc(N)						\
-	static void DivaIdiRequest##N(ENTITY *e)			\
-	{ if (IoAdapters[N]) (*IoAdapters[N]->DIRequest)(IoAdapters[N], e); }
-
-/*
-**  Create own 32 Adapters
-*/
-DivaIdiReqFunc(0)
-DivaIdiReqFunc(1)
-DivaIdiReqFunc(2)
-DivaIdiReqFunc(3)
-DivaIdiReqFunc(4)
-DivaIdiReqFunc(5)
-DivaIdiReqFunc(6)
-DivaIdiReqFunc(7)
-DivaIdiReqFunc(8)
-DivaIdiReqFunc(9)
-DivaIdiReqFunc(10)
-DivaIdiReqFunc(11)
-DivaIdiReqFunc(12)
-DivaIdiReqFunc(13)
-DivaIdiReqFunc(14)
-DivaIdiReqFunc(15)
-DivaIdiReqFunc(16)
-DivaIdiReqFunc(17)
-DivaIdiReqFunc(18)
-DivaIdiReqFunc(19)
-DivaIdiReqFunc(20)
-DivaIdiReqFunc(21)
-DivaIdiReqFunc(22)
-DivaIdiReqFunc(23)
-DivaIdiReqFunc(24)
-DivaIdiReqFunc(25)
-DivaIdiReqFunc(26)
-DivaIdiReqFunc(27)
-DivaIdiReqFunc(28)
-DivaIdiReqFunc(29)
-DivaIdiReqFunc(30)
-DivaIdiReqFunc(31)
-
-/*
-**  LOCALS
-*/
-static LIST_HEAD(adapter_queue);
-
-typedef struct _diva_get_xlog {
-	word command;
-	byte req;
-	byte rc;
-	byte data[sizeof(struct mi_pc_maint)];
-} diva_get_xlog_t;
-
-typedef struct _diva_supported_cards_info {
-	int CardOrdinal;
-	diva_init_card_proc_t init_card;
-} diva_supported_cards_info_t;
-
-static diva_supported_cards_info_t divas_supported_cards[] = {
-#ifdef CONFIG_ISDN_DIVAS_PRIPCI
-	/*
-	  PRI Cards
-	*/
-	{CARDTYPE_DIVASRV_P_30M_PCI, diva_pri_init_card},
-	/*
-	  PRI Rev.2 Cards
-	*/
-	{CARDTYPE_DIVASRV_P_30M_V2_PCI, diva_pri_init_card},
-	/*
-	  PRI Rev.2 VoIP Cards
-	*/
-	{CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI, diva_pri_init_card},
-#endif
-#ifdef CONFIG_ISDN_DIVAS_BRIPCI
-	/*
-	  4BRI Rev 1 Cards
-	*/
-	{CARDTYPE_DIVASRV_Q_8M_PCI, diva_4bri_init_card},
-	{CARDTYPE_DIVASRV_VOICE_Q_8M_PCI, diva_4bri_init_card},
-	/*
-	  4BRI Rev 2 Cards
-	*/
-	{CARDTYPE_DIVASRV_Q_8M_V2_PCI, diva_4bri_init_card},
-	{CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI, diva_4bri_init_card},
-	/*
-	  4BRI Based BRI Rev 2 Cards
-	*/
-	{CARDTYPE_DIVASRV_B_2M_V2_PCI, diva_4bri_init_card},
-	{CARDTYPE_DIVASRV_B_2F_PCI, diva_4bri_init_card},
-	{CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI, diva_4bri_init_card},
-	/*
-	  BRI
-	*/
-	{CARDTYPE_MAESTRA_PCI, diva_bri_init_card},
-#endif
-
-	/*
-	  EOL
-	*/
-	{-1}
-};
-
-static void diva_init_request_array(void);
-static void *divas_create_pci_card(int handle, void *pci_dev_handle);
-
-static diva_os_spin_lock_t adapter_lock;
-
-static int diva_find_free_adapters(int base, int nr)
-{
-	int i;
-
-	for (i = 0; i < nr; i++) {
-		if (IoAdapters[base + i]) {
-			return (-1);
-		}
-	}
-
-	return (0);
-}
-
-static diva_os_xdi_adapter_t *diva_q_get_next(struct list_head *what)
-{
-	diva_os_xdi_adapter_t *a = NULL;
-
-	if (what && (what->next != &adapter_queue))
-		a = list_entry(what->next, diva_os_xdi_adapter_t, link);
-
-	return (a);
-}
-
-/* --------------------------------------------------------------------------
-   Add card to the card list
-   -------------------------------------------------------------------------- */
-void *diva_driver_add_card(void *pdev, unsigned long CardOrdinal)
-{
-	diva_os_spin_lock_magic_t old_irql;
-	diva_os_xdi_adapter_t *pdiva, *pa;
-	int i, j, max, nr;
-
-	for (i = 0; divas_supported_cards[i].CardOrdinal != -1; i++) {
-		if (divas_supported_cards[i].CardOrdinal == CardOrdinal) {
-			if (!(pdiva = divas_create_pci_card(i, pdev))) {
-				return NULL;
-			}
-			switch (CardOrdinal) {
-			case CARDTYPE_DIVASRV_Q_8M_PCI:
-			case CARDTYPE_DIVASRV_VOICE_Q_8M_PCI:
-			case CARDTYPE_DIVASRV_Q_8M_V2_PCI:
-			case CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI:
-				max = MAX_ADAPTER - 4;
-				nr = 4;
-				break;
-
-			default:
-				max = MAX_ADAPTER;
-				nr = 1;
-			}
-
-			diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
-
-			for (i = 0; i < max; i++) {
-				if (!diva_find_free_adapters(i, nr)) {
-					pdiva->controller = i + 1;
-					pdiva->xdi_adapter.ANum = pdiva->controller;
-					IoAdapters[i] = &pdiva->xdi_adapter;
-					diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
-					create_adapter_proc(pdiva);	/* add adapter to proc file system */
-
-					DBG_LOG(("add %s:%d",
-						 CardProperties
-						 [CardOrdinal].Name,
-						 pdiva->controller))
-
-						diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
-					pa = pdiva;
-					for (j = 1; j < nr; j++) {	/* slave adapters, if any */
-						pa = diva_q_get_next(&pa->link);
-						if (pa && !pa->interface.cleanup_adapter_proc) {
-							pa->controller = i + 1 + j;
-							pa->xdi_adapter.ANum = pa->controller;
-							IoAdapters[i + j] = &pa->xdi_adapter;
-							diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
-							DBG_LOG(("add slave adapter (%d)",
-								 pa->controller))
-								create_adapter_proc(pa);	/* add adapter to proc file system */
-							diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
-						} else {
-							DBG_ERR(("slave adapter problem"))
-								break;
-						}
-					}
-
-					diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
-					return (pdiva);
-				}
-			}
-
-			diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
-
-			/*
-			  Not able to add adapter - remove it and return error
-			*/
-			DBG_ERR(("can not alloc request array"))
-				diva_driver_remove_card(pdiva);
-
-			return NULL;
-		}
-	}
-
-	return NULL;
-}
-
-/* --------------------------------------------------------------------------
-   Called on driver load, MAIN, main, DriverEntry
-   -------------------------------------------------------------------------- */
-int divasa_xdi_driver_entry(void)
-{
-	diva_os_initialize_spin_lock(&adapter_lock, "adapter");
-	memset(&IoAdapters[0], 0x00, sizeof(IoAdapters));
-	diva_init_request_array();
-
-	return (0);
-}
-
-/* --------------------------------------------------------------------------
-   Remove adapter from list
-   -------------------------------------------------------------------------- */
-static diva_os_xdi_adapter_t *get_and_remove_from_queue(void)
-{
-	diva_os_spin_lock_magic_t old_irql;
-	diva_os_xdi_adapter_t *a = NULL;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "driver_unload");
-
-	if (!list_empty(&adapter_queue)) {
-		a = list_entry(adapter_queue.next, diva_os_xdi_adapter_t, link);
-		list_del(adapter_queue.next);
-	}
-
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "driver_unload");
-	return (a);
-}
-
-/* --------------------------------------------------------------------------
-   Remove card from the card list
-   -------------------------------------------------------------------------- */
-void diva_driver_remove_card(void *pdiva)
-{
-	diva_os_spin_lock_magic_t old_irql;
-	diva_os_xdi_adapter_t *a[4];
-	diva_os_xdi_adapter_t *pa;
-	int i;
-
-	pa = a[0] = (diva_os_xdi_adapter_t *) pdiva;
-	a[1] = a[2] = a[3] = NULL;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "remode adapter");
-
-	for (i = 1; i < 4; i++) {
-		if ((pa = diva_q_get_next(&pa->link))
-		    && !pa->interface.cleanup_adapter_proc) {
-			a[i] = pa;
-		} else {
-			break;
-		}
-	}
-
-	for (i = 0; ((i < 4) && a[i]); i++) {
-		list_del(&a[i]->link);
-	}
-
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "driver_unload");
-
-	(*(a[0]->interface.cleanup_adapter_proc)) (a[0]);
-
-	for (i = 0; i < 4; i++) {
-		if (a[i]) {
-			if (a[i]->controller) {
-				DBG_LOG(("remove adapter (%d)",
-					 a[i]->controller)) IoAdapters[a[i]->controller - 1] = NULL;
-				remove_adapter_proc(a[i]);
-			}
-			diva_os_free(0, a[i]);
-		}
-	}
-}
-
-/* --------------------------------------------------------------------------
-   Create diva PCI adapter and init internal adapter structures
-   -------------------------------------------------------------------------- */
-static void *divas_create_pci_card(int handle, void *pci_dev_handle)
-{
-	diva_supported_cards_info_t *pI = &divas_supported_cards[handle];
-	diva_os_spin_lock_magic_t old_irql;
-	diva_os_xdi_adapter_t *a;
-
-	DBG_LOG(("found %d-%s", pI->CardOrdinal, CardProperties[pI->CardOrdinal].Name))
-
-		if (!(a = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) {
-			DBG_ERR(("A: can't alloc adapter"));
-			return NULL;
-		}
-
-	memset(a, 0x00, sizeof(*a));
-
-	a->CardIndex = handle;
-	a->CardOrdinal = pI->CardOrdinal;
-	a->Bus = DIVAS_XDI_ADAPTER_BUS_PCI;
-	a->xdi_adapter.cardType = a->CardOrdinal;
-	a->resources.pci.bus = diva_os_get_pci_bus(pci_dev_handle);
-	a->resources.pci.func = diva_os_get_pci_func(pci_dev_handle);
-	a->resources.pci.hdev = pci_dev_handle;
-
-	/*
-	  Add master adapter first, so slave adapters will receive higher
-	  numbers as master adapter
-	*/
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
-	list_add_tail(&a->link, &adapter_queue);
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
-
-	if ((*(pI->init_card)) (a)) {
-		diva_os_enter_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
-		list_del(&a->link);
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
-		diva_os_free(0, a);
-		DBG_ERR(("A: can't get adapter resources"));
-		return NULL;
-	}
-
-	return (a);
-}
-
-/* --------------------------------------------------------------------------
-   Called on driver unload FINIT, finit, Unload
-   -------------------------------------------------------------------------- */
-void divasa_xdi_driver_unload(void)
-{
-	diva_os_xdi_adapter_t *a;
-
-	while ((a = get_and_remove_from_queue())) {
-		if (a->interface.cleanup_adapter_proc) {
-			(*(a->interface.cleanup_adapter_proc)) (a);
-		}
-		if (a->controller) {
-			IoAdapters[a->controller - 1] = NULL;
-			remove_adapter_proc(a);
-		}
-		diva_os_free(0, a);
-	}
-	diva_os_destroy_spin_lock(&adapter_lock, "adapter");
-}
-
-/*
-**  Receive and process command from user mode utility
-*/
-void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
-			    int length, void *mptr,
-			    divas_xdi_copy_from_user_fn_t cp_fn)
-{
-	diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
-	diva_os_xdi_adapter_t *a = NULL;
-	diva_os_spin_lock_magic_t old_irql;
-	struct list_head *tmp;
-
-	if (length < sizeof(diva_xdi_um_cfg_cmd_t)) {
-		DBG_ERR(("A: A(?) open, msg too small (%d < %d)",
-			 length, sizeof(diva_xdi_um_cfg_cmd_t)))
-			return NULL;
-	}
-	if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
-		DBG_ERR(("A: A(?) open, write error"))
-			return NULL;
-	}
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
-	list_for_each(tmp, &adapter_queue) {
-		a = list_entry(tmp, diva_os_xdi_adapter_t, link);
-		if (a->controller == (int)msg->adapter)
-			break;
-		a = NULL;
-	}
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
-
-	if (!a) {
-		DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
-			}
-
-	return (a);
-}
-
-/*
-**  Easy cleanup mailbox status
-*/
-void diva_xdi_close_adapter(void *adapter, void *os_handle)
-{
-	diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
-
-	a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
-	if (a->xdi_mbox.data) {
-		diva_os_free(0, a->xdi_mbox.data);
-		a->xdi_mbox.data = NULL;
-	}
-}
-
-int
-diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
-	       int length, void *mptr,
-	       divas_xdi_copy_from_user_fn_t cp_fn)
-{
-	diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
-	diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
-	void *data;
-
-	if (a->xdi_mbox.status & DIVA_XDI_MBOX_BUSY) {
-		DBG_ERR(("A: A(%d) write, mbox busy", a->controller))
-			return (-1);
-	}
-
-	if (length < sizeof(diva_xdi_um_cfg_cmd_t)) {
-		DBG_ERR(("A: A(%d) write, message too small (%d < %d)",
-			 a->controller, length,
-			 sizeof(diva_xdi_um_cfg_cmd_t)))
-			return (-3);
-	}
-
-	if (!(data = diva_os_malloc(0, length))) {
-		DBG_ERR(("A: A(%d) write, ENOMEM", a->controller))
-			return (-2);
-	}
-
-	if (msg) {
-		*(diva_xdi_um_cfg_cmd_t *)data = *msg;
-		length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
-				   src + sizeof(*msg), length - sizeof(*msg));
-	} else {
-		length = (*cp_fn) (os_handle, data, src, length);
-	}
-	if (length > 0) {
-		if ((*(a->interface.cmd_proc))
-		    (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
-			length = -3;
-		}
-	} else {
-		DBG_ERR(("A: A(%d) write error (%d)", a->controller,
-			 length))
-			}
-
-	diva_os_free(0, data);
-
-	return (length);
-}
-
-/*
-**  Write answers to user mode utility, if any
-*/
-int
-diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
-	      int max_length, divas_xdi_copy_to_user_fn_t cp_fn)
-{
-	diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
-	int ret;
-
-	if (!(a->xdi_mbox.status & DIVA_XDI_MBOX_BUSY)) {
-		DBG_ERR(("A: A(%d) rx mbox empty", a->controller))
-			return (-1);
-	}
-	if (!a->xdi_mbox.data) {
-		a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
-		DBG_ERR(("A: A(%d) rx ENOMEM", a->controller))
-			return (-2);
-	}
-
-	if (max_length < a->xdi_mbox.data_length) {
-		DBG_ERR(("A: A(%d) rx buffer too short(%d < %d)",
-			 a->controller, max_length,
-			 a->xdi_mbox.data_length))
-			return (-3);
-	}
-
-	ret = (*cp_fn) (os_handle, dst, a->xdi_mbox.data,
-			a->xdi_mbox.data_length);
-	if (ret > 0) {
-		diva_os_free(0, a->xdi_mbox.data);
-		a->xdi_mbox.data = NULL;
-		a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
-	}
-
-	return (ret);
-}
-
-
-irqreturn_t diva_os_irq_wrapper(int irq, void *context)
-{
-	diva_os_xdi_adapter_t *a = context;
-	diva_xdi_clear_interrupts_proc_t clear_int_proc;
-
-	if (!a || !a->xdi_adapter.diva_isr_handler)
-		return IRQ_NONE;
-
-	if ((clear_int_proc = a->clear_interrupts_proc)) {
-		(*clear_int_proc) (a);
-		a->clear_interrupts_proc = NULL;
-		return IRQ_HANDLED;
-	}
-
-	(*(a->xdi_adapter.diva_isr_handler)) (&a->xdi_adapter);
-	return IRQ_HANDLED;
-}
-
-static void diva_init_request_array(void)
-{
-	Requests[0] = DivaIdiRequest0;
-	Requests[1] = DivaIdiRequest1;
-	Requests[2] = DivaIdiRequest2;
-	Requests[3] = DivaIdiRequest3;
-	Requests[4] = DivaIdiRequest4;
-	Requests[5] = DivaIdiRequest5;
-	Requests[6] = DivaIdiRequest6;
-	Requests[7] = DivaIdiRequest7;
-	Requests[8] = DivaIdiRequest8;
-	Requests[9] = DivaIdiRequest9;
-	Requests[10] = DivaIdiRequest10;
-	Requests[11] = DivaIdiRequest11;
-	Requests[12] = DivaIdiRequest12;
-	Requests[13] = DivaIdiRequest13;
-	Requests[14] = DivaIdiRequest14;
-	Requests[15] = DivaIdiRequest15;
-	Requests[16] = DivaIdiRequest16;
-	Requests[17] = DivaIdiRequest17;
-	Requests[18] = DivaIdiRequest18;
-	Requests[19] = DivaIdiRequest19;
-	Requests[20] = DivaIdiRequest20;
-	Requests[21] = DivaIdiRequest21;
-	Requests[22] = DivaIdiRequest22;
-	Requests[23] = DivaIdiRequest23;
-	Requests[24] = DivaIdiRequest24;
-	Requests[25] = DivaIdiRequest25;
-	Requests[26] = DivaIdiRequest26;
-	Requests[27] = DivaIdiRequest27;
-	Requests[28] = DivaIdiRequest28;
-	Requests[29] = DivaIdiRequest29;
-	Requests[30] = DivaIdiRequest30;
-	Requests[31] = DivaIdiRequest31;
-}
-
-void diva_xdi_display_adapter_features(int card)
-{
-	dword features;
-	if (!card || ((card - 1) >= MAX_ADAPTER) || !IoAdapters[card - 1]) {
-		return;
-	}
-	card--;
-	features = IoAdapters[card]->Properties.Features;
-
-	DBG_LOG(("FEATURES FOR ADAPTER: %d", card + 1))
-		DBG_LOG((" DI_FAX3          :  %s",
-			 (features & DI_FAX3) ? "Y" : "N"))
-		DBG_LOG((" DI_MODEM         :  %s",
-			 (features & DI_MODEM) ? "Y" : "N"))
-		DBG_LOG((" DI_POST          :  %s",
-			 (features & DI_POST) ? "Y" : "N"))
-		DBG_LOG((" DI_V110          :  %s",
-			 (features & DI_V110) ? "Y" : "N"))
-		DBG_LOG((" DI_V120          :  %s",
-			 (features & DI_V120) ? "Y" : "N"))
-		DBG_LOG((" DI_POTS          :  %s",
-			 (features & DI_POTS) ? "Y" : "N"))
-		DBG_LOG((" DI_CODEC         :  %s",
-			 (features & DI_CODEC) ? "Y" : "N"))
-		DBG_LOG((" DI_MANAGE        :  %s",
-			 (features & DI_MANAGE) ? "Y" : "N"))
-		DBG_LOG((" DI_V_42          :  %s",
-			 (features & DI_V_42) ? "Y" : "N"))
-		DBG_LOG((" DI_EXTD_FAX      :  %s",
-			 (features & DI_EXTD_FAX) ? "Y" : "N"))
-		DBG_LOG((" DI_AT_PARSER     :  %s",
-			 (features & DI_AT_PARSER) ? "Y" : "N"))
-		DBG_LOG((" DI_VOICE_OVER_IP :  %s",
-			 (features & DI_VOICE_OVER_IP) ? "Y" : "N"))
-		}
-
-void diva_add_slave_adapter(diva_os_xdi_adapter_t *a)
-{
-	diva_os_spin_lock_magic_t old_irql;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add_slave");
-	list_add_tail(&a->link, &adapter_queue);
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add_slave");
-}
-
-int diva_card_read_xlog(diva_os_xdi_adapter_t *a)
-{
-	diva_get_xlog_t *req;
-	byte *data;
-
-	if (!a->xdi_adapter.Initialized || !a->xdi_adapter.DIRequest) {
-		return (-1);
-	}
-	if (!(data = diva_os_malloc(0, sizeof(struct mi_pc_maint)))) {
-		return (-1);
-	}
-	memset(data, 0x00, sizeof(struct mi_pc_maint));
-
-	if (!(req = diva_os_malloc(0, sizeof(*req)))) {
-		diva_os_free(0, data);
-		return (-1);
-	}
-	req->command = 0x0400;
-	req->req = LOG;
-	req->rc = 0x00;
-
-	(*(a->xdi_adapter.DIRequest)) (&a->xdi_adapter, (ENTITY *) req);
-
-	if (!req->rc || req->req) {
-		diva_os_free(0, data);
-		diva_os_free(0, req);
-		return (-1);
-	}
-
-	memcpy(data, &req->req, sizeof(struct mi_pc_maint));
-
-	diva_os_free(0, req);
-
-	a->xdi_mbox.data_length = sizeof(struct mi_pc_maint);
-	a->xdi_mbox.data = data;
-	a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-
-	return (0);
-}
-
-void xdiFreeFile(void *handle)
-{
-}
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
deleted file mode 100644
index 1ad7665..0000000
--- a/drivers/isdn/hardware/eicon/diva.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: diva.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
-
-#ifndef __DIVA_XDI_OS_PART_H__
-#define __DIVA_XDI_OS_PART_H__
-
-
-int divasa_xdi_driver_entry(void);
-void divasa_xdi_driver_unload(void);
-void *diva_driver_add_card(void *pdev, unsigned long CardOrdinal);
-void diva_driver_remove_card(void *pdiva);
-
-typedef int (*divas_xdi_copy_to_user_fn_t) (void *os_handle, void __user *dst,
-					    const void *src, int length);
-
-typedef int (*divas_xdi_copy_from_user_fn_t) (void *os_handle, void *dst,
-					      const void __user *src, int length);
-
-int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
-		  int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
-
-int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
-		   int length, void *msg,
-		   divas_xdi_copy_from_user_fn_t cp_fn);
-
-void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
-			    int length, void *msg,
-			    divas_xdi_copy_from_user_fn_t cp_fn);
-
-void diva_xdi_close_adapter(void *adapter, void *os_handle);
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
deleted file mode 100644
index 60e7925..0000000
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/* $Id: diva_didd.c,v 1.13.6.4 2005/02/11 19:40:25 armin Exp $
- *
- * DIDD Interface module for Eicon active cards.
- *
- * Functions are in dadapter.c
- *
- * Copyright 2002-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2002-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <net/net_namespace.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "dadapter.h"
-#include "divasync.h"
-#include "did_vers.h"
-
-static char *main_revision = "$Revision: 1.13.6.4 $";
-
-static char *DRIVERNAME =
-	"Eicon DIVA - DIDD table (http://www.melware.net)";
-static char *DRIVERLNAME = "divadidd";
-char *DRIVERRELEASE_DIDD = "2.0";
-
-MODULE_DESCRIPTION("DIDD table driver for diva drivers");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("Eicon diva drivers");
-MODULE_LICENSE("GPL");
-
-#define DBG_MINIMUM  (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT  (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-extern int diddfunc_init(void);
-extern void diddfunc_finit(void);
-
-extern void DIVA_DIDD_Read(void *, int);
-
-static struct proc_dir_entry *proc_didd;
-struct proc_dir_entry *proc_net_eicon = NULL;
-
-EXPORT_SYMBOL(DIVA_DIDD_Read);
-EXPORT_SYMBOL(proc_net_eicon);
-
-static char *getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "1.0";
-	return rev;
-}
-
-static int divadidd_proc_show(struct seq_file *m, void *v)
-{
-	char tmprev[32];
-
-	strcpy(tmprev, main_revision);
-	seq_printf(m, "%s\n", DRIVERNAME);
-	seq_printf(m, "name     : %s\n", DRIVERLNAME);
-	seq_printf(m, "release  : %s\n", DRIVERRELEASE_DIDD);
-	seq_printf(m, "build    : %s(%s)\n",
-		   diva_didd_common_code_build, DIVA_BUILD);
-	seq_printf(m, "revision : %s\n", getrev(tmprev));
-
-	return 0;
-}
-
-static int __init create_proc(void)
-{
-	proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
-
-	if (proc_net_eicon) {
-		proc_didd = proc_create_single(DRIVERLNAME, S_IRUGO,
-				proc_net_eicon, divadidd_proc_show);
-		return (1);
-	}
-	return (0);
-}
-
-static void remove_proc(void)
-{
-	remove_proc_entry(DRIVERLNAME, proc_net_eicon);
-	remove_proc_entry("eicon", init_net.proc_net);
-}
-
-static int __init divadidd_init(void)
-{
-	char tmprev[32];
-	int ret = 0;
-
-	printk(KERN_INFO "%s\n", DRIVERNAME);
-	printk(KERN_INFO "%s: Rel:%s  Rev:", DRIVERLNAME, DRIVERRELEASE_DIDD);
-	strcpy(tmprev, main_revision);
-	printk("%s  Build:%s(%s)\n", getrev(tmprev),
-	       diva_didd_common_code_build, DIVA_BUILD);
-
-	if (!create_proc()) {
-		printk(KERN_ERR "%s: could not create proc entry\n",
-		       DRIVERLNAME);
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!diddfunc_init()) {
-		printk(KERN_ERR "%s: failed to connect to DIDD.\n",
-		       DRIVERLNAME);
-#ifdef MODULE
-		remove_proc();
-#endif
-		ret = -EIO;
-		goto out;
-	}
-
-out:
-	return (ret);
-}
-
-static void __exit divadidd_exit(void)
-{
-	diddfunc_finit();
-	remove_proc();
-	printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divadidd_init);
-module_exit(divadidd_exit);
diff --git a/drivers/isdn/hardware/eicon/diva_dma.c b/drivers/isdn/hardware/eicon/diva_dma.c
deleted file mode 100644
index 217b6aa..0000000
--- a/drivers/isdn/hardware/eicon/diva_dma.c
+++ /dev/null
@@ -1,94 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "diva_dma.h"
-/*
-  Every entry has length of PAGE_SIZE
-  and represents one single physical page
-*/
-struct _diva_dma_map_entry {
-	int busy;
-	dword phys_bus_addr;  /* 32bit address as seen by the card */
-	void *local_ram_addr; /* local address as seen by the host */
-	void *addr_handle;    /* handle uset to free allocated memory */
-};
-/*
-  Create local mapping structure and init it to default state
-*/
-struct _diva_dma_map_entry *diva_alloc_dma_map(void *os_context, int nentries) {
-	diva_dma_map_entry_t *pmap = diva_os_malloc(0, sizeof(*pmap) * (nentries + 1));
-	if (pmap)
-		memset(pmap, 0, sizeof(*pmap) * (nentries + 1));
-	return pmap;
-}
-/*
-  Free local map (context should be freed before) if any
-*/
-void diva_free_dma_mapping(struct _diva_dma_map_entry *pmap) {
-	if (pmap) {
-		diva_os_free(0, pmap);
-	}
-}
-/*
-  Set information saved on the map entry
-*/
-void diva_init_dma_map_entry(struct _diva_dma_map_entry *pmap,
-			     int nr, void *virt, dword phys,
-			     void *addr_handle) {
-	pmap[nr].phys_bus_addr  = phys;
-	pmap[nr].local_ram_addr = virt;
-	pmap[nr].addr_handle    = addr_handle;
-}
-/*
-  Allocate one single entry in the map
-*/
-int diva_alloc_dma_map_entry(struct _diva_dma_map_entry *pmap) {
-	int i;
-	for (i = 0; (pmap && pmap[i].local_ram_addr); i++) {
-		if (!pmap[i].busy) {
-			pmap[i].busy = 1;
-			return (i);
-		}
-	}
-	return (-1);
-}
-/*
-  Free one single entry in the map
-*/
-void diva_free_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr) {
-	pmap[nr].busy = 0;
-}
-/*
-  Get information saved on the map entry
-*/
-void diva_get_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr,
-			    void **pvirt, dword *pphys) {
-	*pphys = pmap[nr].phys_bus_addr;
-	*pvirt = pmap[nr].local_ram_addr;
-}
-void *diva_get_entry_handle(struct _diva_dma_map_entry *pmap, int nr) {
-	return (pmap[nr].addr_handle);
-}
diff --git a/drivers/isdn/hardware/eicon/diva_dma.h b/drivers/isdn/hardware/eicon/diva_dma.h
deleted file mode 100644
index d32c91b..0000000
--- a/drivers/isdn/hardware/eicon/diva_dma.h
+++ /dev/null
@@ -1,48 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DMA_MAPPING_IFC_H__
-#define __DIVA_DMA_MAPPING_IFC_H__
-typedef struct _diva_dma_map_entry  diva_dma_map_entry_t;
-struct _diva_dma_map_entry *diva_alloc_dma_map(void *os_context, int nentries);
-void diva_init_dma_map_entry(struct _diva_dma_map_entry *pmap,
-			     int nr, void *virt, dword phys,
-			     void *addr_handle);
-int diva_alloc_dma_map_entry(struct _diva_dma_map_entry *pmap);
-void diva_free_dma_map_entry(struct _diva_dma_map_entry *pmap, int entry);
-void diva_get_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr,
-			    void **pvirt, dword *pphys);
-void diva_free_dma_mapping(struct _diva_dma_map_entry *pmap);
-/*
-  Functionality to be implemented by OS wrapper
-  and running in process context
-*/
-void diva_init_dma_map(void *hdev,
-		       struct _diva_dma_map_entry **ppmap,
-		       int nentries);
-void diva_free_dma_map(void *hdev,
-		       struct _diva_dma_map_entry *pmap);
-void *diva_get_entry_handle(struct _diva_dma_map_entry *pmap, int nr);
-#endif
diff --git a/drivers/isdn/hardware/eicon/diva_pci.h b/drivers/isdn/hardware/eicon/diva_pci.h
deleted file mode 100644
index 7ef5db9..0000000
--- a/drivers/isdn/hardware/eicon/diva_pci.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: diva_pci.h,v 1.6 2003/01/04 15:29:45 schindler Exp $ */
-
-#ifndef __DIVA_PCI_INTERFACE_H__
-#define __DIVA_PCI_INTERFACE_H__
-
-void __iomem *divasa_remap_pci_bar(diva_os_xdi_adapter_t *a,
-				   int id,
-				   unsigned long bar,
-				   unsigned long area_length);
-void divasa_unmap_pci_bar(void __iomem *bar);
-unsigned long divasa_get_pci_irq(unsigned char bus,
-				 unsigned char func, void *pci_dev_handle);
-unsigned long divasa_get_pci_bar(unsigned char bus,
-				 unsigned char func,
-				 int bar, void *pci_dev_handle);
-byte diva_os_get_pci_bus(void *pci_dev_handle);
-byte diva_os_get_pci_func(void *pci_dev_handle);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h
deleted file mode 100644
index c4868a0..0000000
--- a/drivers/isdn/hardware/eicon/divacapi.h
+++ /dev/null
@@ -1,1350 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-/*#define DEBUG */
-
-#include <linux/types.h>
-
-#define IMPLEMENT_DTMF 1
-#define IMPLEMENT_LINE_INTERCONNECT2 1
-#define IMPLEMENT_ECHO_CANCELLER 1
-#define IMPLEMENT_RTP 1
-#define IMPLEMENT_T38 1
-#define IMPLEMENT_FAX_SUB_SEP_PWD 1
-#define IMPLEMENT_V18 1
-#define IMPLEMENT_DTMF_TONE 1
-#define IMPLEMENT_PIAFS 1
-#define IMPLEMENT_FAX_PAPER_FORMATS 1
-#define IMPLEMENT_VOWN 1
-#define IMPLEMENT_CAPIDTMF 1
-#define IMPLEMENT_FAX_NONSTANDARD 1
-#define VSWITCH_SUPPORT 1
-
-
-#define IMPLEMENT_LINE_INTERCONNECT 0
-#define IMPLEMENT_MARKED_OK_AFTER_FC 1
-
-#include "capidtmf.h"
-
-/*------------------------------------------------------------------*/
-/* Common API internal definitions                                  */
-/*------------------------------------------------------------------*/
-
-#define MAX_APPL 240
-#define MAX_NCCI           127
-
-#define MSG_IN_QUEUE_SIZE  ((4096 + 3) & 0xfffc)  /* must be multiple of 4 */
-
-
-#define MSG_IN_OVERHEAD    sizeof(APPL   *)
-
-#define MAX_NL_CHANNEL     255
-#define MAX_DATA_B3        8
-#define MAX_DATA_ACK       MAX_DATA_B3
-#define MAX_MULTI_IE       6
-#define MAX_MSG_SIZE       256
-#define MAX_MSG_PARMS      10
-#define MAX_CPN_MASK_SIZE  16
-#define MAX_MSN_CONFIG     10
-#define EXT_CONTROLLER     0x80
-#define CODEC              0x01
-#define CODEC_PERMANENT    0x02
-#define ADV_VOICE          0x03
-#define MAX_CIP_TYPES      5  /* kind of CIP types for group optimization */
-
-#define FAX_CONNECT_INFO_BUFFER_SIZE  256
-#define NCPI_BUFFER_SIZE              256
-
-#define MAX_CHANNELS_PER_PLCI         8
-#define MAX_INTERNAL_COMMAND_LEVELS   4
-#define INTERNAL_REQ_BUFFER_SIZE      272
-
-#define INTERNAL_IND_BUFFER_SIZE      768
-
-#define DTMF_PARAMETER_BUFFER_SIZE    12
-#define ADV_VOICE_COEF_BUFFER_SIZE    50
-
-#define LI_PLCI_B_QUEUE_ENTRIES       256
-
-
-
-typedef struct _APPL APPL;
-typedef struct _PLCI PLCI;
-typedef struct _NCCI NCCI;
-typedef struct _DIVA_CAPI_ADAPTER DIVA_CAPI_ADAPTER;
-typedef struct _DATA_B3_DESC DATA_B3_DESC;
-typedef struct _DATA_ACK_DESC DATA_ACK_DESC;
-typedef struct manufacturer_profile_s MANUFACTURER_PROFILE;
-typedef struct fax_ncpi_s FAX_NCPI;
-typedef struct api_parse_s API_PARSE;
-typedef struct api_save_s API_SAVE;
-typedef struct msn_config_s MSN_CONFIG;
-typedef struct msn_config_max_s MSN_CONFIG_MAX;
-typedef struct msn_ld_s MSN_LD;
-
-struct manufacturer_profile_s {
-	dword private_options;
-	dword rtp_primary_payloads;
-	dword rtp_additional_payloads;
-};
-
-struct fax_ncpi_s {
-	word options;
-	word format;
-};
-
-struct msn_config_s {
-	byte msn[MAX_CPN_MASK_SIZE];
-};
-
-struct msn_config_max_s {
-	MSN_CONFIG    msn_conf[MAX_MSN_CONFIG];
-};
-
-struct msn_ld_s {
-	dword low;
-	dword high;
-};
-
-struct api_parse_s {
-	word          length;
-	byte *info;
-};
-
-struct api_save_s {
-	API_PARSE     parms[MAX_MSG_PARMS + 1];
-	byte          info[MAX_MSG_SIZE];
-};
-
-struct _DATA_B3_DESC {
-	word          Handle;
-	word          Number;
-	word          Flags;
-	word          Length;
-	void *P;
-};
-
-struct _DATA_ACK_DESC {
-	word          Handle;
-	word          Number;
-};
-
-typedef void (*t_std_internal_command)(dword Id, PLCI *plci, byte Rc);
-
-/************************************************************************/
-/* Don't forget to adapt dos.asm after changing the _APPL structure!!!! */
-struct _APPL {
-	word          Id;
-	word          NullCREnable;
-	word          CDEnable;
-	dword         S_Handle;
-
-
-
-
-
-
-	LIST_ENTRY    s_function;
-	dword         s_context;
-	word          s_count;
-	APPL *s_next;
-	byte *xbuffer_used;
-	void **xbuffer_internal;
-	void **xbuffer_ptr;
-
-
-
-
-
-
-	byte *queue;
-	word          queue_size;
-	word          queue_free;
-	word          queue_read;
-	word          queue_write;
-	word          queue_signal;
-	byte          msg_lost;
-	byte          appl_flags;
-	word          Number;
-
-	word          MaxBuffer;
-	byte          MaxNCCI;
-	byte          MaxNCCIData;
-	word          MaxDataLength;
-	word          NCCIDataFlowCtrlTimer;
-	byte *ReceiveBuffer;
-	word *DataNCCI;
-	word *DataFlags;
-};
-
-
-struct _PLCI {
-	ENTITY        Sig;
-	ENTITY        NL;
-	word          RNum;
-	word          RFlags;
-	BUFFERS       RData[2];
-	BUFFERS       XData[1];
-	BUFFERS       NData[2];
-
-	DIVA_CAPI_ADAPTER   *adapter;
-	APPL      *appl;
-	PLCI      *relatedPTYPLCI;
-	byte          Id;
-	byte          State;
-	byte          sig_req;
-	byte          nl_req;
-	byte          SuppState;
-	byte          channels;
-	byte          tel;
-	byte          B1_resource;
-	byte          B2_prot;
-	byte          B3_prot;
-
-	word          command;
-	word          m_command;
-	word          internal_command;
-	word          number;
-	word          req_in_start;
-	word          req_in;
-	word          req_out;
-	word          msg_in_write_pos;
-	word          msg_in_read_pos;
-	word          msg_in_wrap_pos;
-
-	void *data_sent_ptr;
-	byte          data_sent;
-	byte          send_disc;
-	byte          sig_global_req;
-	byte          sig_remove_id;
-	byte          nl_global_req;
-	byte          nl_remove_id;
-	byte          b_channel;
-	byte          adv_nl;
-	byte          manufacturer;
-	byte          call_dir;
-	byte          hook_state;
-	byte          spoofed_msg;
-	byte          ptyState;
-	byte          cr_enquiry;
-	word          hangup_flow_ctrl_timer;
-
-	word          ncci_ring_list;
-	byte          inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI];
-	t_std_internal_command internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS];
-	DECLARE_BITMAP(c_ind_mask_table, MAX_APPL);
-	DECLARE_BITMAP(group_optimization_mask_table, MAX_APPL);
-	byte          RBuffer[200];
-	dword         msg_in_queue[MSG_IN_QUEUE_SIZE/sizeof(dword)];
-	API_SAVE      saved_msg;
-	API_SAVE      B_protocol;
-	byte          fax_connect_info_length;
-	byte          fax_connect_info_buffer[FAX_CONNECT_INFO_BUFFER_SIZE];
-	byte          fax_edata_ack_length;
-	word          nsf_control_bits;
-	byte          ncpi_state;
-	byte          ncpi_buffer[NCPI_BUFFER_SIZE];
-
-	byte          internal_req_buffer[INTERNAL_REQ_BUFFER_SIZE];
-	byte          internal_ind_buffer[INTERNAL_IND_BUFFER_SIZE + 3];
-	dword         requested_options_conn;
-	dword         requested_options;
-	word          B1_facilities;
-	API_SAVE   *adjust_b_parms_msg;
-	word          adjust_b_facilities;
-	word          adjust_b_command;
-	word          adjust_b_ncci;
-	word          adjust_b_mode;
-	word          adjust_b_state;
-	byte          adjust_b_restore;
-
-	byte          dtmf_rec_active;
-	word          dtmf_rec_pulse_ms;
-	word          dtmf_rec_pause_ms;
-	byte          dtmf_send_requests;
-	word          dtmf_send_pulse_ms;
-	word          dtmf_send_pause_ms;
-	word          dtmf_cmd;
-	word          dtmf_msg_number_queue[8];
-	byte          dtmf_parameter_length;
-	byte          dtmf_parameter_buffer[DTMF_PARAMETER_BUFFER_SIZE];
-
-
-	t_capidtmf_state capidtmf_state;
-
-
-	byte          li_bchannel_id;    /* BRI: 1..2, PRI: 1..32 */
-	byte          li_channel_bits;
-	byte          li_notify_update;
-	word          li_cmd;
-	word          li_write_command;
-	word          li_write_channel;
-	word          li_plci_b_write_pos;
-	word          li_plci_b_read_pos;
-	word          li_plci_b_req_pos;
-	dword         li_plci_b_queue[LI_PLCI_B_QUEUE_ENTRIES];
-
-
-	word          ec_cmd;
-	word          ec_idi_options;
-	word          ec_tail_length;
-
-
-	byte          tone_last_indication_code;
-
-	byte          vswitchstate;
-	byte          vsprot;
-	byte          vsprotdialect;
-	byte          notifiedcall; /* Flag if it is a spoofed call */
-
-	int           rx_dma_descriptor;
-	dword         rx_dma_magic;
-};
-
-
-struct _NCCI {
-	byte          data_out;
-	byte          data_pending;
-	byte          data_ack_out;
-	byte          data_ack_pending;
-	DATA_B3_DESC  DBuffer[MAX_DATA_B3];
-	DATA_ACK_DESC DataAck[MAX_DATA_ACK];
-};
-
-
-struct _DIVA_CAPI_ADAPTER {
-	IDI_CALL      request;
-	byte          Id;
-	byte          max_plci;
-	byte          max_listen;
-	byte          listen_active;
-	PLCI      *plci;
-	byte          ch_ncci[MAX_NL_CHANNEL + 1];
-	byte          ncci_ch[MAX_NCCI + 1];
-	byte          ncci_plci[MAX_NCCI + 1];
-	byte          ncci_state[MAX_NCCI + 1];
-	byte          ncci_next[MAX_NCCI + 1];
-	NCCI          ncci[MAX_NCCI + 1];
-
-	byte          ch_flow_control[MAX_NL_CHANNEL + 1];  /* Used by XON protocol */
-	byte          ch_flow_control_pending;
-	byte          ch_flow_plci[MAX_NL_CHANNEL + 1];
-	int           last_flow_control_ch;
-
-	dword         Info_Mask[MAX_APPL];
-	dword         CIP_Mask[MAX_APPL];
-
-	dword         Notification_Mask[MAX_APPL];
-	PLCI      *codec_listen[MAX_APPL];
-	dword         requested_options_table[MAX_APPL];
-	API_PROFILE   profile;
-	MANUFACTURER_PROFILE man_profile;
-	dword         manufacturer_features;
-
-	byte          AdvCodecFLAG;
-	PLCI      *AdvCodecPLCI;
-	PLCI      *AdvSignalPLCI;
-	APPL      *AdvSignalAppl;
-	byte          TelOAD[23];
-	byte          TelOSA[23];
-	byte          scom_appl_disable;
-	PLCI      *automatic_lawPLCI;
-	byte          automatic_law;
-	byte          u_law;
-
-	byte          adv_voice_coef_length;
-	byte          adv_voice_coef_buffer[ADV_VOICE_COEF_BUFFER_SIZE];
-
-	byte          li_pri;
-	byte          li_channels;
-	word          li_base;
-
-	byte adapter_disabled;
-	byte group_optimization_enabled; /* use application groups if enabled */
-	dword sdram_bar;
-	byte flag_dynamic_l1_down; /* for hunt groups:down layer 1 if no appl present*/
-	byte FlowControlIdTable[256];
-	byte FlowControlSkipTable[256];
-	void *os_card; /* pointer to associated OS dependent adapter structure */
-};
-
-
-/*------------------------------------------------------------------*/
-/* Application flags                                                */
-/*------------------------------------------------------------------*/
-
-#define APPL_FLAG_OLD_LI_SPEC           0x01
-#define APPL_FLAG_PRIV_EC_SPEC          0x02
-
-
-/*------------------------------------------------------------------*/
-/* API parameter definitions                                        */
-/*------------------------------------------------------------------*/
-
-#define X75_TTX         1       /* x.75 for ttx                     */
-#define TRF             2       /* transparent with hdlc framing    */
-#define TRF_IN          3       /* transparent with hdlc fr. inc.   */
-#define SDLC            4       /* sdlc, sna layer-2                */
-#define X75_BTX         5       /* x.75 for btx                     */
-#define LAPD            6       /* lapd (Q.921)                     */
-#define X25_L2          7       /* x.25 layer-2                     */
-#define V120_L2         8       /* V.120 layer-2 protocol           */
-#define V42_IN          9       /* V.42 layer-2 protocol, incoming */
-#define V42            10       /* V.42 layer-2 protocol            */
-#define MDM_ATP        11       /* AT Parser built in the L2        */
-#define X75_V42BIS     12       /* ISO7776 (X.75 SLP) modified to support V.42 bis compression */
-#define RTPL2_IN       13       /* RTP layer-2 protocol, incoming  */
-#define RTPL2          14       /* RTP layer-2 protocol             */
-#define V120_V42BIS    15       /* V.120 layer-2 protocol supporting V.42 bis compression */
-
-#define T70NL           1
-#define X25PLP          2
-#define T70NLX          3
-#define TRANSPARENT_NL  4
-#define ISO8208         5
-#define T30             6
-
-
-/*------------------------------------------------------------------*/
-/* FAX interface to IDI                                             */
-/*------------------------------------------------------------------*/
-
-#define CAPI_MAX_HEAD_LINE_SPACE        89
-#define CAPI_MAX_DATE_TIME_LENGTH       18
-
-#define T30_MAX_STATION_ID_LENGTH       20
-#define T30_MAX_SUBADDRESS_LENGTH       20
-#define T30_MAX_PASSWORD_LENGTH         20
-
-typedef struct t30_info_s T30_INFO;
-struct t30_info_s {
-	byte          code;
-	byte          rate_div_2400;
-	byte          resolution;
-	byte          data_format;
-	byte          pages_low;
-	byte          pages_high;
-	byte          operating_mode;
-	byte          control_bits_low;
-	byte          control_bits_high;
-	byte          feature_bits_low;
-	byte          feature_bits_high;
-	byte          recording_properties;
-	byte          universal_6;
-	byte          universal_7;
-	byte          station_id_len;
-	byte          head_line_len;
-	byte          station_id[T30_MAX_STATION_ID_LENGTH];
-/* byte          head_line[];      */
-/* byte          sub_sep_length;   */
-/* byte          sub_sep_field[];  */
-/* byte          pwd_length;       */
-/* byte          pwd_field[];      */
-/* byte          nsf_info_length;   */
-/* byte          nsf_info_field[];  */
-};
-
-
-#define T30_RESOLUTION_R8_0385          0x00
-#define T30_RESOLUTION_R8_0770_OR_200   0x01
-#define T30_RESOLUTION_R8_1540          0x02
-#define T30_RESOLUTION_R16_1540_OR_400  0x04
-#define T30_RESOLUTION_R4_0385_OR_100   0x08
-#define T30_RESOLUTION_300_300          0x10
-#define T30_RESOLUTION_INCH_BASED       0x40
-#define T30_RESOLUTION_METRIC_BASED     0x80
-
-#define T30_RECORDING_WIDTH_ISO_A4      0
-#define T30_RECORDING_WIDTH_ISO_B4      1
-#define T30_RECORDING_WIDTH_ISO_A3      2
-#define T30_RECORDING_WIDTH_COUNT       3
-
-#define T30_RECORDING_LENGTH_ISO_A4     0
-#define T30_RECORDING_LENGTH_ISO_B4     1
-#define T30_RECORDING_LENGTH_UNLIMITED  2
-#define T30_RECORDING_LENGTH_COUNT      3
-
-#define T30_MIN_SCANLINE_TIME_00_00_00  0
-#define T30_MIN_SCANLINE_TIME_05_05_05  1
-#define T30_MIN_SCANLINE_TIME_10_05_05  2
-#define T30_MIN_SCANLINE_TIME_10_10_10  3
-#define T30_MIN_SCANLINE_TIME_20_10_10  4
-#define T30_MIN_SCANLINE_TIME_20_20_20  5
-#define T30_MIN_SCANLINE_TIME_40_20_20  6
-#define T30_MIN_SCANLINE_TIME_40_40_40  7
-#define T30_MIN_SCANLINE_TIME_RES_8     8
-#define T30_MIN_SCANLINE_TIME_RES_9     9
-#define T30_MIN_SCANLINE_TIME_RES_10    10
-#define T30_MIN_SCANLINE_TIME_10_10_05  11
-#define T30_MIN_SCANLINE_TIME_20_10_05  12
-#define T30_MIN_SCANLINE_TIME_20_20_10  13
-#define T30_MIN_SCANLINE_TIME_40_20_10  14
-#define T30_MIN_SCANLINE_TIME_40_40_20  15
-#define T30_MIN_SCANLINE_TIME_COUNT     16
-
-#define T30_DATA_FORMAT_SFF             0
-#define T30_DATA_FORMAT_ASCII           1
-#define T30_DATA_FORMAT_NATIVE          2
-#define T30_DATA_FORMAT_COUNT           3
-
-
-#define T30_OPERATING_MODE_STANDARD     0
-#define T30_OPERATING_MODE_CLASS2       1
-#define T30_OPERATING_MODE_CLASS1       2
-#define T30_OPERATING_MODE_CAPI         3
-#define T30_OPERATING_MODE_CAPI_NEG     4
-#define T30_OPERATING_MODE_COUNT        5
-
-/* EDATA transmit messages */
-#define EDATA_T30_DIS         0x01
-#define EDATA_T30_FTT         0x02
-#define EDATA_T30_MCF         0x03
-#define EDATA_T30_PARAMETERS  0x04
-
-/* EDATA receive messages */
-#define EDATA_T30_DCS         0x81
-#define EDATA_T30_TRAIN_OK    0x82
-#define EDATA_T30_EOP         0x83
-#define EDATA_T30_MPS         0x84
-#define EDATA_T30_EOM         0x85
-#define EDATA_T30_DTC         0x86
-#define EDATA_T30_PAGE_END    0x87   /* Indicates end of page data. Reserved, but not implemented ! */
-#define EDATA_T30_EOP_CAPI    0x88
-
-
-#define T30_SUCCESS                        0
-#define T30_ERR_NO_DIS_RECEIVED            1
-#define T30_ERR_TIMEOUT_NO_RESPONSE        2
-#define T30_ERR_RETRY_NO_RESPONSE          3
-#define T30_ERR_TOO_MANY_REPEATS           4
-#define T30_ERR_UNEXPECTED_MESSAGE         5
-#define T30_ERR_UNEXPECTED_DCN             6
-#define T30_ERR_DTC_UNSUPPORTED            7
-#define T30_ERR_ALL_RATES_FAILED           8
-#define T30_ERR_TOO_MANY_TRAINS            9
-#define T30_ERR_RECEIVE_CORRUPTED          10
-#define T30_ERR_UNEXPECTED_DISC            11
-#define T30_ERR_APPLICATION_DISC           12
-#define T30_ERR_INCOMPATIBLE_DIS           13
-#define T30_ERR_INCOMPATIBLE_DCS           14
-#define T30_ERR_TIMEOUT_NO_COMMAND         15
-#define T30_ERR_RETRY_NO_COMMAND           16
-#define T30_ERR_TIMEOUT_COMMAND_TOO_LONG   17
-#define T30_ERR_TIMEOUT_RESPONSE_TOO_LONG  18
-#define T30_ERR_NOT_IDENTIFIED             19
-#define T30_ERR_SUPERVISORY_TIMEOUT        20
-#define T30_ERR_TOO_LONG_SCAN_LINE         21
-/* #define T30_ERR_RETRY_NO_PAGE_AFTER_MPS    22 */
-#define T30_ERR_RETRY_NO_PAGE_RECEIVED     23
-#define T30_ERR_RETRY_NO_DCS_AFTER_FTT     24
-#define T30_ERR_RETRY_NO_DCS_AFTER_EOM     25
-#define T30_ERR_RETRY_NO_DCS_AFTER_MPS     26
-#define T30_ERR_RETRY_NO_DCN_AFTER_MCF     27
-#define T30_ERR_RETRY_NO_DCN_AFTER_RTN     28
-#define T30_ERR_RETRY_NO_CFR               29
-#define T30_ERR_RETRY_NO_MCF_AFTER_EOP     30
-#define T30_ERR_RETRY_NO_MCF_AFTER_EOM     31
-#define T30_ERR_RETRY_NO_MCF_AFTER_MPS     32
-#define T30_ERR_SUB_SEP_UNSUPPORTED        33
-#define T30_ERR_PWD_UNSUPPORTED            34
-#define T30_ERR_SUB_SEP_PWD_UNSUPPORTED    35
-#define T30_ERR_INVALID_COMMAND_FRAME      36
-#define T30_ERR_UNSUPPORTED_PAGE_CODING    37
-#define T30_ERR_INVALID_PAGE_CODING        38
-#define T30_ERR_INCOMPATIBLE_PAGE_CONFIG   39
-#define T30_ERR_TIMEOUT_FROM_APPLICATION   40
-#define T30_ERR_V34FAX_NO_REACTION_ON_MARK 41
-#define T30_ERR_V34FAX_TRAINING_TIMEOUT    42
-#define T30_ERR_V34FAX_UNEXPECTED_V21      43
-#define T30_ERR_V34FAX_PRIMARY_CTS_ON      44
-#define T30_ERR_V34FAX_TURNAROUND_POLLING  45
-#define T30_ERR_V34FAX_V8_INCOMPATIBILITY  46
-
-
-#define T30_CONTROL_BIT_DISABLE_FINE       0x0001
-#define T30_CONTROL_BIT_ENABLE_ECM         0x0002
-#define T30_CONTROL_BIT_ECM_64_BYTES       0x0004
-#define T30_CONTROL_BIT_ENABLE_2D_CODING   0x0008
-#define T30_CONTROL_BIT_ENABLE_T6_CODING   0x0010
-#define T30_CONTROL_BIT_ENABLE_UNCOMPR     0x0020
-#define T30_CONTROL_BIT_ACCEPT_POLLING     0x0040
-#define T30_CONTROL_BIT_REQUEST_POLLING    0x0080
-#define T30_CONTROL_BIT_MORE_DOCUMENTS     0x0100
-#define T30_CONTROL_BIT_ACCEPT_SUBADDRESS  0x0200
-#define T30_CONTROL_BIT_ACCEPT_SEL_POLLING 0x0400
-#define T30_CONTROL_BIT_ACCEPT_PASSWORD    0x0800
-#define T30_CONTROL_BIT_ENABLE_V34FAX      0x1000
-#define T30_CONTROL_BIT_EARLY_CONNECT      0x2000
-
-#define T30_CONTROL_BIT_ALL_FEATURES  (T30_CONTROL_BIT_ENABLE_ECM | T30_CONTROL_BIT_ENABLE_2D_CODING |   T30_CONTROL_BIT_ENABLE_T6_CODING | T30_CONTROL_BIT_ENABLE_UNCOMPR |   T30_CONTROL_BIT_ENABLE_V34FAX)
-
-#define T30_FEATURE_BIT_FINE               0x0001
-#define T30_FEATURE_BIT_ECM                0x0002
-#define T30_FEATURE_BIT_ECM_64_BYTES       0x0004
-#define T30_FEATURE_BIT_2D_CODING          0x0008
-#define T30_FEATURE_BIT_T6_CODING          0x0010
-#define T30_FEATURE_BIT_UNCOMPR_ENABLED    0x0020
-#define T30_FEATURE_BIT_POLLING            0x0040
-#define T30_FEATURE_BIT_MORE_DOCUMENTS     0x0100
-#define T30_FEATURE_BIT_V34FAX             0x1000
-
-
-#define T30_NSF_CONTROL_BIT_ENABLE_NSF     0x0001
-#define T30_NSF_CONTROL_BIT_RAW_INFO       0x0002
-#define T30_NSF_CONTROL_BIT_NEGOTIATE_IND  0x0004
-#define T30_NSF_CONTROL_BIT_NEGOTIATE_RESP 0x0008
-
-#define T30_NSF_ELEMENT_NSF_FIF            0x00
-#define T30_NSF_ELEMENT_NSC_FIF            0x01
-#define T30_NSF_ELEMENT_NSS_FIF            0x02
-#define T30_NSF_ELEMENT_COMPANY_NAME       0x03
-
-
-/*------------------------------------------------------------------*/
-/* Analog modem definitions                                         */
-/*------------------------------------------------------------------*/
-
-typedef struct async_s ASYNC_FORMAT;
-struct async_s {
-	unsigned pe:1;
-	unsigned parity:2;
-	unsigned spare:2;
-	unsigned stp:1;
-	unsigned ch_len:2;   /* 3th octett in CAI */
-};
-
-
-/*------------------------------------------------------------------*/
-/* PLCI/NCCI states                                                 */
-/*------------------------------------------------------------------*/
-
-#define IDLE                    0
-#define OUTG_CON_PENDING        1
-#define INC_CON_PENDING         2
-#define INC_CON_ALERT           3
-#define INC_CON_ACCEPT          4
-#define INC_ACT_PENDING         5
-#define LISTENING               6
-#define CONNECTED               7
-#define OUTG_DIS_PENDING        8
-#define INC_DIS_PENDING         9
-#define LOCAL_CONNECT           10
-#define INC_RES_PENDING         11
-#define OUTG_RES_PENDING        12
-#define SUSPENDING              13
-#define ADVANCED_VOICE_SIG      14
-#define ADVANCED_VOICE_NOSIG    15
-#define RESUMING                16
-#define INC_CON_CONNECTED_ALERT 17
-#define OUTG_REJ_PENDING        18
-
-
-/*------------------------------------------------------------------*/
-/* auxiliary states for supplementary services                     */
-/*------------------------------------------------------------------*/
-
-#define IDLE                0
-#define HOLD_REQUEST        1
-#define HOLD_INDICATE       2
-#define CALL_HELD           3
-#define RETRIEVE_REQUEST    4
-#define RETRIEVE_INDICATION 5
-
-/*------------------------------------------------------------------*/
-/* Capi IE + Msg types                                              */
-/*------------------------------------------------------------------*/
-#define ESC_CAUSE        0x800 | CAU        /* Escape cause element */
-#define ESC_MSGTYPE      0x800 | MSGTYPEIE  /* Escape message type  */
-#define ESC_CHI          0x800 | CHI        /* Escape channel id    */
-#define ESC_LAW          0x800 | BC         /* Escape law info      */
-#define ESC_CR           0x800 | CRIE       /* Escape CallReference */
-#define ESC_PROFILE      0x800 | PROFILEIE  /* Escape profile       */
-#define ESC_SSEXT        0x800 | SSEXTIE    /* Escape Supplem. Serv.*/
-#define ESC_VSWITCH      0x800 | VSWITCHIE  /* Escape VSwitch       */
-#define CST              0x14               /* Call State i.e.      */
-#define PI               0x1E               /* Progress Indicator   */
-#define NI               0x27               /* Notification Ind     */
-#define CONN_NR          0x4C               /* Connected Number     */
-#define CONG_RNR         0xBF               /* Congestion RNR       */
-#define CONG_RR          0xB0               /* Congestion RR        */
-#define RESERVED         0xFF               /* Res. for future use  */
-#define ON_BOARD_CODEC   0x02               /* external controller  */
-#define HANDSET          0x04               /* Codec+Handset(Pro11) */
-#define HOOK_SUPPORT     0x01               /* activate Hook signal */
-#define SCR              0x7a               /* unscreened number    */
-
-#define HOOK_OFF_REQ     0x9001             /* internal conn req    */
-#define HOOK_ON_REQ      0x9002             /* internal disc req    */
-#define SUSPEND_REQ      0x9003             /* internal susp req    */
-#define RESUME_REQ       0x9004             /* internal resume req  */
-#define USELAW_REQ       0x9005             /* internal law    req  */
-#define LISTEN_SIG_ASSIGN_PEND  0x9006
-#define PERM_LIST_REQ    0x900a             /* permanent conn DCE   */
-#define C_HOLD_REQ       0x9011
-#define C_RETRIEVE_REQ   0x9012
-#define C_NCR_FAC_REQ    0x9013
-#define PERM_COD_ASSIGN  0x9014
-#define PERM_COD_CALL    0x9015
-#define PERM_COD_HOOK    0x9016
-#define PERM_COD_CONN_PEND 0x9017           /* wait for connect_con */
-#define PTY_REQ_PEND     0x9018
-#define CD_REQ_PEND      0x9019
-#define CF_START_PEND    0x901a
-#define CF_STOP_PEND     0x901b
-#define ECT_REQ_PEND     0x901c
-#define GETSERV_REQ_PEND 0x901d
-#define BLOCK_PLCI       0x901e
-#define INTERR_NUMBERS_REQ_PEND         0x901f
-#define INTERR_DIVERSION_REQ_PEND       0x9020
-#define MWI_ACTIVATE_REQ_PEND           0x9021
-#define MWI_DEACTIVATE_REQ_PEND         0x9022
-#define SSEXT_REQ_COMMAND               0x9023
-#define SSEXT_NC_REQ_COMMAND            0x9024
-#define START_L1_SIG_ASSIGN_PEND        0x9025
-#define REM_L1_SIG_ASSIGN_PEND          0x9026
-#define CONF_BEGIN_REQ_PEND             0x9027
-#define CONF_ADD_REQ_PEND               0x9028
-#define CONF_SPLIT_REQ_PEND             0x9029
-#define CONF_DROP_REQ_PEND              0x902a
-#define CONF_ISOLATE_REQ_PEND           0x902b
-#define CONF_REATTACH_REQ_PEND          0x902c
-#define VSWITCH_REQ_PEND                0x902d
-#define GET_MWI_STATE                   0x902e
-#define CCBS_REQUEST_REQ_PEND           0x902f
-#define CCBS_DEACTIVATE_REQ_PEND        0x9030
-#define CCBS_INTERROGATE_REQ_PEND       0x9031
-
-#define NO_INTERNAL_COMMAND             0
-#define DTMF_COMMAND_1                  1
-#define DTMF_COMMAND_2                  2
-#define DTMF_COMMAND_3                  3
-#define MIXER_COMMAND_1                 4
-#define MIXER_COMMAND_2                 5
-#define MIXER_COMMAND_3                 6
-#define ADV_VOICE_COMMAND_CONNECT_1     7
-#define ADV_VOICE_COMMAND_CONNECT_2     8
-#define ADV_VOICE_COMMAND_CONNECT_3     9
-#define ADV_VOICE_COMMAND_DISCONNECT_1  10
-#define ADV_VOICE_COMMAND_DISCONNECT_2  11
-#define ADV_VOICE_COMMAND_DISCONNECT_3  12
-#define ADJUST_B_RESTORE_1              13
-#define ADJUST_B_RESTORE_2              14
-#define RESET_B3_COMMAND_1              15
-#define SELECT_B_COMMAND_1              16
-#define FAX_CONNECT_INFO_COMMAND_1      17
-#define FAX_CONNECT_INFO_COMMAND_2      18
-#define FAX_ADJUST_B23_COMMAND_1        19
-#define FAX_ADJUST_B23_COMMAND_2        20
-#define EC_COMMAND_1                    21
-#define EC_COMMAND_2                    22
-#define EC_COMMAND_3                    23
-#define RTP_CONNECT_B3_REQ_COMMAND_1    24
-#define RTP_CONNECT_B3_REQ_COMMAND_2    25
-#define RTP_CONNECT_B3_REQ_COMMAND_3    26
-#define RTP_CONNECT_B3_RES_COMMAND_1    27
-#define RTP_CONNECT_B3_RES_COMMAND_2    28
-#define RTP_CONNECT_B3_RES_COMMAND_3    29
-#define HOLD_SAVE_COMMAND_1             30
-#define RETRIEVE_RESTORE_COMMAND_1      31
-#define FAX_DISCONNECT_COMMAND_1        32
-#define FAX_DISCONNECT_COMMAND_2        33
-#define FAX_DISCONNECT_COMMAND_3        34
-#define FAX_EDATA_ACK_COMMAND_1         35
-#define FAX_EDATA_ACK_COMMAND_2         36
-#define FAX_CONNECT_ACK_COMMAND_1       37
-#define FAX_CONNECT_ACK_COMMAND_2       38
-#define STD_INTERNAL_COMMAND_COUNT      39
-
-#define UID              0x2d               /* User Id for Mgmt      */
-
-#define CALL_DIR_OUT             0x01       /* call direction of initial call */
-#define CALL_DIR_IN              0x02
-#define CALL_DIR_ORIGINATE       0x04       /* DTE/DCE direction according to */
-#define CALL_DIR_ANSWER          0x08       /*   state of B-Channel Operation */
-#define CALL_DIR_FORCE_OUTG_NL   0x10       /* for RESET_B3 reconnect, after DISC_B3... */
-
-#define AWAITING_MANUF_CON 0x80             /* command spoofing flags */
-#define SPOOFING_REQUIRED  0xff
-#define AWAITING_SELECT_B  0xef
-
-/*------------------------------------------------------------------*/
-/* B_CTRL / DSP_CTRL                                                */
-/*------------------------------------------------------------------*/
-
-#define DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS     0x01
-#define DSP_CTRL_SET_BCHANNEL_PASSIVATION_BRI   0x02
-#define DSP_CTRL_SET_DTMF_PARAMETERS            0x03
-
-#define MANUFACTURER_FEATURE_SLAVE_CODEC          0x00000001L
-#define MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS   0x00000002L
-#define MANUFACTURER_FEATURE_HARDDTMF             0x00000004L
-#define MANUFACTURER_FEATURE_SOFTDTMF_SEND        0x00000008L
-#define MANUFACTURER_FEATURE_DTMF_PARAMETERS      0x00000010L
-#define MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE     0x00000020L
-#define MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD      0x00000040L
-#define MANUFACTURER_FEATURE_V18                  0x00000080L
-#define MANUFACTURER_FEATURE_MIXER_CH_CH          0x00000100L
-#define MANUFACTURER_FEATURE_MIXER_CH_PC          0x00000200L
-#define MANUFACTURER_FEATURE_MIXER_PC_CH          0x00000400L
-#define MANUFACTURER_FEATURE_MIXER_PC_PC          0x00000800L
-#define MANUFACTURER_FEATURE_ECHO_CANCELLER       0x00001000L
-#define MANUFACTURER_FEATURE_RTP                  0x00002000L
-#define MANUFACTURER_FEATURE_T38                  0x00004000L
-#define MANUFACTURER_FEATURE_TRANSP_DELIVERY_CONF 0x00008000L
-#define MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL  0x00010000L
-#define MANUFACTURER_FEATURE_OOB_CHANNEL          0x00020000L
-#define MANUFACTURER_FEATURE_IN_BAND_CHANNEL      0x00040000L
-#define MANUFACTURER_FEATURE_IN_BAND_FEATURE      0x00080000L
-#define MANUFACTURER_FEATURE_PIAFS                0x00100000L
-#define MANUFACTURER_FEATURE_DTMF_TONE            0x00200000L
-#define MANUFACTURER_FEATURE_FAX_PAPER_FORMATS    0x00400000L
-#define MANUFACTURER_FEATURE_OK_FC_LABEL          0x00800000L
-#define MANUFACTURER_FEATURE_VOWN                 0x01000000L
-#define MANUFACTURER_FEATURE_XCONNECT             0x02000000L
-#define MANUFACTURER_FEATURE_DMACONNECT           0x04000000L
-#define MANUFACTURER_FEATURE_AUDIO_TAP            0x08000000L
-#define MANUFACTURER_FEATURE_FAX_NONSTANDARD      0x10000000L
-
-/*------------------------------------------------------------------*/
-/* DTMF interface to IDI                                            */
-/*------------------------------------------------------------------*/
-
-
-#define DTMF_DIGIT_TONE_LOW_GROUP_697_HZ        0x00
-#define DTMF_DIGIT_TONE_LOW_GROUP_770_HZ        0x01
-#define DTMF_DIGIT_TONE_LOW_GROUP_852_HZ        0x02
-#define DTMF_DIGIT_TONE_LOW_GROUP_941_HZ        0x03
-#define DTMF_DIGIT_TONE_LOW_GROUP_MASK          0x03
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1209_HZ      0x00
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1336_HZ      0x04
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1477_HZ      0x08
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1633_HZ      0x0c
-#define DTMF_DIGIT_TONE_HIGH_GROUP_MASK         0x0c
-#define DTMF_DIGIT_TONE_CODE_0                  0x07
-#define DTMF_DIGIT_TONE_CODE_1                  0x00
-#define DTMF_DIGIT_TONE_CODE_2                  0x04
-#define DTMF_DIGIT_TONE_CODE_3                  0x08
-#define DTMF_DIGIT_TONE_CODE_4                  0x01
-#define DTMF_DIGIT_TONE_CODE_5                  0x05
-#define DTMF_DIGIT_TONE_CODE_6                  0x09
-#define DTMF_DIGIT_TONE_CODE_7                  0x02
-#define DTMF_DIGIT_TONE_CODE_8                  0x06
-#define DTMF_DIGIT_TONE_CODE_9                  0x0a
-#define DTMF_DIGIT_TONE_CODE_STAR               0x03
-#define DTMF_DIGIT_TONE_CODE_HASHMARK           0x0b
-#define DTMF_DIGIT_TONE_CODE_A                  0x0c
-#define DTMF_DIGIT_TONE_CODE_B                  0x0d
-#define DTMF_DIGIT_TONE_CODE_C                  0x0e
-#define DTMF_DIGIT_TONE_CODE_D                  0x0f
-
-#define DTMF_UDATA_REQUEST_SEND_DIGITS            16
-#define DTMF_UDATA_REQUEST_ENABLE_RECEIVER        17
-#define DTMF_UDATA_REQUEST_DISABLE_RECEIVER       18
-#define DTMF_UDATA_INDICATION_DIGITS_SENT         16
-#define DTMF_UDATA_INDICATION_DIGITS_RECEIVED     17
-#define DTMF_UDATA_INDICATION_MODEM_CALLING_TONE  18
-#define DTMF_UDATA_INDICATION_FAX_CALLING_TONE    19
-#define DTMF_UDATA_INDICATION_ANSWER_TONE         20
-
-#define UDATA_REQUEST_MIXER_TAP_DATA        27
-#define UDATA_INDICATION_MIXER_TAP_DATA     27
-
-#define DTMF_LISTEN_ACTIVE_FLAG        0x01
-#define DTMF_SEND_DIGIT_FLAG           0x01
-
-
-/*------------------------------------------------------------------*/
-/* Mixer interface to IDI                                           */
-/*------------------------------------------------------------------*/
-
-
-#define LI2_FLAG_PCCONNECT_A_B 0x40000000
-#define LI2_FLAG_PCCONNECT_B_A 0x80000000
-
-#define MIXER_BCHANNELS_BRI    2
-#define MIXER_IC_CHANNELS_BRI  MIXER_BCHANNELS_BRI
-#define MIXER_IC_CHANNEL_BASE  MIXER_BCHANNELS_BRI
-#define MIXER_CHANNELS_BRI     (MIXER_BCHANNELS_BRI + MIXER_IC_CHANNELS_BRI)
-#define MIXER_CHANNELS_PRI     32
-
-typedef struct li_config_s LI_CONFIG;
-
-struct xconnect_card_address_s {
-	dword low;
-	dword high;
-};
-
-struct xconnect_transfer_address_s {
-	struct xconnect_card_address_s card_address;
-	dword offset;
-};
-
-struct li_config_s {
-	DIVA_CAPI_ADAPTER   *adapter;
-	PLCI   *plci;
-	struct xconnect_transfer_address_s send_b;
-	struct xconnect_transfer_address_s send_pc;
-	byte   *flag_table;  /* dword aligned and sized */
-	byte   *coef_table;  /* dword aligned and sized */
-	byte channel;
-	byte curchnl;
-	byte chflags;
-};
-
-extern LI_CONFIG   *li_config_table;
-extern word li_total_channels;
-
-#define LI_CHANNEL_INVOLVED        0x01
-#define LI_CHANNEL_ACTIVE          0x02
-#define LI_CHANNEL_TX_DATA         0x04
-#define LI_CHANNEL_RX_DATA         0x08
-#define LI_CHANNEL_CONFERENCE      0x10
-#define LI_CHANNEL_ADDRESSES_SET   0x80
-
-#define LI_CHFLAG_MONITOR          0x01
-#define LI_CHFLAG_MIX              0x02
-#define LI_CHFLAG_LOOP             0x04
-
-#define LI_FLAG_INTERCONNECT       0x01
-#define LI_FLAG_MONITOR            0x02
-#define LI_FLAG_MIX                0x04
-#define LI_FLAG_PCCONNECT          0x08
-#define LI_FLAG_CONFERENCE         0x10
-#define LI_FLAG_ANNOUNCEMENT       0x20
-
-#define LI_COEF_CH_CH              0x01
-#define LI_COEF_CH_PC              0x02
-#define LI_COEF_PC_CH              0x04
-#define LI_COEF_PC_PC              0x08
-#define LI_COEF_CH_CH_SET          0x10
-#define LI_COEF_CH_PC_SET          0x20
-#define LI_COEF_PC_CH_SET          0x40
-#define LI_COEF_PC_PC_SET          0x80
-
-#define LI_REQ_SILENT_UPDATE       0xffff
-
-#define LI_PLCI_B_LAST_FLAG        ((dword) 0x80000000L)
-#define LI_PLCI_B_DISC_FLAG        ((dword) 0x40000000L)
-#define LI_PLCI_B_SKIP_FLAG        ((dword) 0x20000000L)
-#define LI_PLCI_B_FLAG_MASK        ((dword) 0xe0000000L)
-
-#define UDATA_REQUEST_SET_MIXER_COEFS_BRI       24
-#define UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC  25
-#define UDATA_REQUEST_SET_MIXER_COEFS_PRI_ASYN  26
-#define UDATA_INDICATION_MIXER_COEFS_SET        24
-
-#define MIXER_FEATURE_ENABLE_TX_DATA        0x0001
-#define MIXER_FEATURE_ENABLE_RX_DATA        0x0002
-
-#define MIXER_COEF_LINE_CHANNEL_MASK        0x1f
-#define MIXER_COEF_LINE_FROM_PC_FLAG        0x20
-#define MIXER_COEF_LINE_TO_PC_FLAG          0x40
-#define MIXER_COEF_LINE_ROW_FLAG            0x80
-
-#define UDATA_REQUEST_XCONNECT_FROM         28
-#define UDATA_INDICATION_XCONNECT_FROM      28
-#define UDATA_REQUEST_XCONNECT_TO           29
-#define UDATA_INDICATION_XCONNECT_TO        29
-
-#define XCONNECT_CHANNEL_PORT_B             0x0000
-#define XCONNECT_CHANNEL_PORT_PC            0x8000
-#define XCONNECT_CHANNEL_PORT_MASK          0x8000
-#define XCONNECT_CHANNEL_NUMBER_MASK        0x7fff
-#define XCONNECT_CHANNEL_PORT_COUNT         2
-
-#define XCONNECT_SUCCESS           0x0000
-#define XCONNECT_ERROR             0x0001
-
-
-/*------------------------------------------------------------------*/
-/* Echo canceller interface to IDI                                  */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_ECHO_CANCELLER         0
-
-#define PRIV_SELECTOR_ECHO_CANCELLER   255
-
-#define EC_ENABLE_OPERATION            1
-#define EC_DISABLE_OPERATION           2
-#define EC_FREEZE_COEFFICIENTS         3
-#define EC_RESUME_COEFFICIENT_UPDATE   4
-#define EC_RESET_COEFFICIENTS          5
-
-#define EC_DISABLE_NON_LINEAR_PROCESSING     0x0001
-#define EC_DO_NOT_REQUIRE_REVERSALS          0x0002
-#define EC_DETECT_DISABLE_TONE               0x0004
-
-#define EC_SUCCESS                           0
-#define EC_UNSUPPORTED_OPERATION             1
-
-#define EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ   1
-#define EC_BYPASS_DUE_TO_REVERSED_2100HZ     2
-#define EC_BYPASS_RELEASED                   3
-
-#define DSP_CTRL_SET_LEC_PARAMETERS          0x05
-
-#define LEC_ENABLE_ECHO_CANCELLER            0x0001
-#define LEC_ENABLE_2100HZ_DETECTOR           0x0002
-#define LEC_REQUIRE_2100HZ_REVERSALS         0x0004
-#define LEC_MANUAL_DISABLE                   0x0008
-#define LEC_ENABLE_NONLINEAR_PROCESSING      0x0010
-#define LEC_FREEZE_COEFFICIENTS              0x0020
-#define LEC_RESET_COEFFICIENTS               0x8000
-
-#define LEC_MAX_SUPPORTED_TAIL_LENGTH        32
-
-#define LEC_UDATA_INDICATION_DISABLE_DETECT  9
-
-#define LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ  0x00
-#define LEC_DISABLE_TYPE_REVERSED_2100HZ     0x01
-#define LEC_DISABLE_RELEASED                 0x02
-
-
-/*------------------------------------------------------------------*/
-/* RTP interface to IDI                                             */
-/*------------------------------------------------------------------*/
-
-
-#define B1_RTP                  31
-#define B2_RTP                  31
-#define B3_RTP                  31
-
-#define PRIVATE_RTP                    1
-
-#define RTP_PRIM_PAYLOAD_PCMU_8000     0
-#define RTP_PRIM_PAYLOAD_1016_8000     1
-#define RTP_PRIM_PAYLOAD_G726_32_8000  2
-#define RTP_PRIM_PAYLOAD_GSM_8000      3
-#define RTP_PRIM_PAYLOAD_G723_8000     4
-#define RTP_PRIM_PAYLOAD_DVI4_8000     5
-#define RTP_PRIM_PAYLOAD_DVI4_16000    6
-#define RTP_PRIM_PAYLOAD_LPC_8000      7
-#define RTP_PRIM_PAYLOAD_PCMA_8000     8
-#define RTP_PRIM_PAYLOAD_G722_16000    9
-#define RTP_PRIM_PAYLOAD_QCELP_8000    12
-#define RTP_PRIM_PAYLOAD_G728_8000     14
-#define RTP_PRIM_PAYLOAD_G729_8000     18
-#define RTP_PRIM_PAYLOAD_GSM_HR_8000   30
-#define RTP_PRIM_PAYLOAD_GSM_EFR_8000  31
-
-#define RTP_ADD_PAYLOAD_BASE           32
-#define RTP_ADD_PAYLOAD_RED            32
-#define RTP_ADD_PAYLOAD_CN_8000        33
-#define RTP_ADD_PAYLOAD_DTMF           34
-
-#define RTP_SUCCESS                         0
-#define RTP_ERR_SSRC_OR_PAYLOAD_CHANGE      1
-
-#define UDATA_REQUEST_RTP_RECONFIGURE       64
-#define UDATA_INDICATION_RTP_CHANGE         65
-#define BUDATA_REQUEST_QUERY_RTCP_REPORT    1
-#define BUDATA_INDICATION_RTCP_REPORT       1
-
-#define RTP_CONNECT_OPTION_DISC_ON_SSRC_CHANGE    0x00000001L
-#define RTP_CONNECT_OPTION_DISC_ON_PT_CHANGE      0x00000002L
-#define RTP_CONNECT_OPTION_DISC_ON_UNKNOWN_PT     0x00000004L
-#define RTP_CONNECT_OPTION_NO_SILENCE_TRANSMIT    0x00010000L
-
-#define RTP_PAYLOAD_OPTION_VOICE_ACTIVITY_DETECT  0x0001
-#define RTP_PAYLOAD_OPTION_DISABLE_POST_FILTER    0x0002
-#define RTP_PAYLOAD_OPTION_G723_LOW_CODING_RATE   0x0100
-
-#define RTP_PACKET_FILTER_IGNORE_UNKNOWN_SSRC     0x00000001L
-
-#define RTP_CHANGE_FLAG_SSRC_CHANGE               0x00000001L
-#define RTP_CHANGE_FLAG_PAYLOAD_TYPE_CHANGE       0x00000002L
-#define RTP_CHANGE_FLAG_UNKNOWN_PAYLOAD_TYPE      0x00000004L
-
-
-/*------------------------------------------------------------------*/
-/* T.38 interface to IDI                                            */
-/*------------------------------------------------------------------*/
-
-
-#define B1_T38                  30
-#define B2_T38                  30
-#define B3_T38                  30
-
-#define PRIVATE_T38                    2
-
-
-/*------------------------------------------------------------------*/
-/* PIAFS interface to IDI                                            */
-/*------------------------------------------------------------------*/
-
-
-#define B1_PIAFS                29
-#define B2_PIAFS                29
-
-#define PRIVATE_PIAFS           29
-
-/*
-  B2 configuration for PIAFS:
-  +---------------------+------+-----------------------------------------+
-  | PIAFS Protocol      | byte | Bit 1 - Protocol Speed                  |
-  | Speed configuration |      |         0 - 32K                         |
-  |                     |      |         1 - 64K (default)               |
-  |                     |      | Bit 2 - Variable Protocol Speed         |
-  |                     |      |         0 - Speed is fix                |
-  |                     |      |         1 - Speed is variable (default) |
-  +---------------------+------+-----------------------------------------+
-  | Direction           | word | Enable compression/decompression for    |
-  |                     |      | 0: All direction                        |
-  |                     |      | 1: disable outgoing data                |
-  |                     |      | 2: disable incoming data               |
-  |                     |      | 3: disable both direction (default)     |
-  +---------------------+------+-----------------------------------------+
-  | Number of code      | word | Parameter P1 of V.42bis in accordance   |
-  | words               |      | with V.42bis                            |
-  +---------------------+------+-----------------------------------------+
-  | Maximum String      | word | Parameter P2 of V.42bis in accordance   |
-  | Length              |      | with V.42bis                            |
-  +---------------------+------+-----------------------------------------+
-  | control (UDATA)     | byte | enable PIAFS control communication      |
-  | abilities           |      |                                         |
-  +---------------------+------+-----------------------------------------+
-*/
-#define PIAFS_UDATA_ABILITIES  0x80
-
-/*------------------------------------------------------------------*/
-/* FAX SUB/SEP/PWD extension                                        */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_FAX_SUB_SEP_PWD        3
-
-
-
-/*------------------------------------------------------------------*/
-/* V.18 extension                                                   */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_V18                    4
-
-
-
-/*------------------------------------------------------------------*/
-/* DTMF TONE extension                                              */
-/*------------------------------------------------------------------*/
-
-
-#define DTMF_GET_SUPPORTED_DETECT_CODES  0xf8
-#define DTMF_GET_SUPPORTED_SEND_CODES    0xf9
-#define DTMF_LISTEN_TONE_START           0xfa
-#define DTMF_LISTEN_TONE_STOP            0xfb
-#define DTMF_SEND_TONE                   0xfc
-#define DTMF_LISTEN_MF_START             0xfd
-#define DTMF_LISTEN_MF_STOP              0xfe
-#define DTMF_SEND_MF                     0xff
-
-#define DTMF_MF_DIGIT_TONE_CODE_1               0x10
-#define DTMF_MF_DIGIT_TONE_CODE_2               0x11
-#define DTMF_MF_DIGIT_TONE_CODE_3               0x12
-#define DTMF_MF_DIGIT_TONE_CODE_4               0x13
-#define DTMF_MF_DIGIT_TONE_CODE_5               0x14
-#define DTMF_MF_DIGIT_TONE_CODE_6               0x15
-#define DTMF_MF_DIGIT_TONE_CODE_7               0x16
-#define DTMF_MF_DIGIT_TONE_CODE_8               0x17
-#define DTMF_MF_DIGIT_TONE_CODE_9               0x18
-#define DTMF_MF_DIGIT_TONE_CODE_0               0x19
-#define DTMF_MF_DIGIT_TONE_CODE_K1              0x1a
-#define DTMF_MF_DIGIT_TONE_CODE_K2              0x1b
-#define DTMF_MF_DIGIT_TONE_CODE_KP              0x1c
-#define DTMF_MF_DIGIT_TONE_CODE_S1              0x1d
-#define DTMF_MF_DIGIT_TONE_CODE_ST              0x1e
-
-#define DTMF_DIGIT_CODE_COUNT                   16
-#define DTMF_MF_DIGIT_CODE_BASE                 DSP_DTMF_DIGIT_CODE_COUNT
-#define DTMF_MF_DIGIT_CODE_COUNT                15
-#define DTMF_TOTAL_DIGIT_CODE_COUNT             (DSP_MF_DIGIT_CODE_BASE + DSP_MF_DIGIT_CODE_COUNT)
-
-#define DTMF_TONE_DIGIT_BASE                    0x80
-
-#define DTMF_SIGNAL_NO_TONE                     (DTMF_TONE_DIGIT_BASE + 0)
-#define DTMF_SIGNAL_UNIDENTIFIED_TONE           (DTMF_TONE_DIGIT_BASE + 1)
-
-#define DTMF_SIGNAL_DIAL_TONE                   (DTMF_TONE_DIGIT_BASE + 2)
-#define DTMF_SIGNAL_PABX_INTERNAL_DIAL_TONE     (DTMF_TONE_DIGIT_BASE + 3)
-#define DTMF_SIGNAL_SPECIAL_DIAL_TONE           (DTMF_TONE_DIGIT_BASE + 4)   /* stutter dial tone */
-#define DTMF_SIGNAL_SECOND_DIAL_TONE            (DTMF_TONE_DIGIT_BASE + 5)
-#define DTMF_SIGNAL_RINGING_TONE                (DTMF_TONE_DIGIT_BASE + 6)
-#define DTMF_SIGNAL_SPECIAL_RINGING_TONE        (DTMF_TONE_DIGIT_BASE + 7)
-#define DTMF_SIGNAL_BUSY_TONE                   (DTMF_TONE_DIGIT_BASE + 8)
-#define DTMF_SIGNAL_CONGESTION_TONE             (DTMF_TONE_DIGIT_BASE + 9)   /* reorder tone */
-#define DTMF_SIGNAL_SPECIAL_INFORMATION_TONE    (DTMF_TONE_DIGIT_BASE + 10)
-#define DTMF_SIGNAL_COMFORT_TONE                (DTMF_TONE_DIGIT_BASE + 11)
-#define DTMF_SIGNAL_HOLD_TONE                   (DTMF_TONE_DIGIT_BASE + 12)
-#define DTMF_SIGNAL_RECORD_TONE                 (DTMF_TONE_DIGIT_BASE + 13)
-#define DTMF_SIGNAL_CALLER_WAITING_TONE         (DTMF_TONE_DIGIT_BASE + 14)
-#define DTMF_SIGNAL_CALL_WAITING_TONE           (DTMF_TONE_DIGIT_BASE + 15)
-#define DTMF_SIGNAL_PAY_TONE                    (DTMF_TONE_DIGIT_BASE + 16)
-#define DTMF_SIGNAL_POSITIVE_INDICATION_TONE    (DTMF_TONE_DIGIT_BASE + 17)
-#define DTMF_SIGNAL_NEGATIVE_INDICATION_TONE    (DTMF_TONE_DIGIT_BASE + 18)
-#define DTMF_SIGNAL_WARNING_TONE                (DTMF_TONE_DIGIT_BASE + 19)
-#define DTMF_SIGNAL_INTRUSION_TONE              (DTMF_TONE_DIGIT_BASE + 20)
-#define DTMF_SIGNAL_CALLING_CARD_SERVICE_TONE   (DTMF_TONE_DIGIT_BASE + 21)
-#define DTMF_SIGNAL_PAYPHONE_RECOGNITION_TONE   (DTMF_TONE_DIGIT_BASE + 22)
-#define DTMF_SIGNAL_CPE_ALERTING_SIGNAL         (DTMF_TONE_DIGIT_BASE + 23)
-#define DTMF_SIGNAL_OFF_HOOK_WARNING_TONE       (DTMF_TONE_DIGIT_BASE + 24)
-
-#define DTMF_SIGNAL_INTERCEPT_TONE              (DTMF_TONE_DIGIT_BASE + 63)
-
-#define DTMF_SIGNAL_MODEM_CALLING_TONE          (DTMF_TONE_DIGIT_BASE + 64)
-#define DTMF_SIGNAL_FAX_CALLING_TONE            (DTMF_TONE_DIGIT_BASE + 65)
-#define DTMF_SIGNAL_ANSWER_TONE                 (DTMF_TONE_DIGIT_BASE + 66)
-#define DTMF_SIGNAL_REVERSED_ANSWER_TONE        (DTMF_TONE_DIGIT_BASE + 67)
-#define DTMF_SIGNAL_ANSAM_TONE                  (DTMF_TONE_DIGIT_BASE + 68)
-#define DTMF_SIGNAL_REVERSED_ANSAM_TONE         (DTMF_TONE_DIGIT_BASE + 69)
-#define DTMF_SIGNAL_BELL103_ANSWER_TONE         (DTMF_TONE_DIGIT_BASE + 70)
-#define DTMF_SIGNAL_FAX_FLAGS                   (DTMF_TONE_DIGIT_BASE + 71)
-#define DTMF_SIGNAL_G2_FAX_GROUP_ID             (DTMF_TONE_DIGIT_BASE + 72)
-#define DTMF_SIGNAL_HUMAN_SPEECH                (DTMF_TONE_DIGIT_BASE + 73)
-#define DTMF_SIGNAL_ANSWERING_MACHINE_390       (DTMF_TONE_DIGIT_BASE + 74)
-
-#define DTMF_MF_LISTEN_ACTIVE_FLAG     0x02
-#define DTMF_SEND_MF_FLAG              0x02
-#define DTMF_TONE_LISTEN_ACTIVE_FLAG   0x04
-#define DTMF_SEND_TONE_FLAG            0x04
-
-#define PRIVATE_DTMF_TONE              5
-
-
-/*------------------------------------------------------------------*/
-/* FAX paper format extension                                       */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_FAX_PAPER_FORMATS      6
-
-
-
-/*------------------------------------------------------------------*/
-/* V.OWN extension                                                  */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_VOWN                   7
-
-
-
-/*------------------------------------------------------------------*/
-/* FAX non-standard facilities extension                            */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_FAX_NONSTANDARD        8
-
-
-
-/*------------------------------------------------------------------*/
-/* Advanced voice                                                   */
-/*------------------------------------------------------------------*/
-
-#define ADV_VOICE_WRITE_ACTIVATION    0
-#define ADV_VOICE_WRITE_DEACTIVATION  1
-#define ADV_VOICE_WRITE_UPDATE        2
-
-#define ADV_VOICE_OLD_COEF_COUNT    6
-#define ADV_VOICE_NEW_COEF_BASE     (ADV_VOICE_OLD_COEF_COUNT * sizeof(word))
-
-/*------------------------------------------------------------------*/
-/* B1 resource switching                                            */
-/*------------------------------------------------------------------*/
-
-#define B1_FACILITY_LOCAL  0x01
-#define B1_FACILITY_MIXER  0x02
-#define B1_FACILITY_DTMFX  0x04
-#define B1_FACILITY_DTMFR  0x08
-#define B1_FACILITY_VOICE  0x10
-#define B1_FACILITY_EC     0x20
-
-#define ADJUST_B_MODE_SAVE          0x0001
-#define ADJUST_B_MODE_REMOVE_L23    0x0002
-#define ADJUST_B_MODE_SWITCH_L1     0x0004
-#define ADJUST_B_MODE_NO_RESOURCE   0x0008
-#define ADJUST_B_MODE_ASSIGN_L23    0x0010
-#define ADJUST_B_MODE_USER_CONNECT  0x0020
-#define ADJUST_B_MODE_CONNECT       0x0040
-#define ADJUST_B_MODE_RESTORE       0x0080
-
-#define ADJUST_B_START                     0
-#define ADJUST_B_SAVE_MIXER_1              1
-#define ADJUST_B_SAVE_DTMF_1               2
-#define ADJUST_B_REMOVE_L23_1              3
-#define ADJUST_B_REMOVE_L23_2              4
-#define ADJUST_B_SAVE_EC_1                 5
-#define ADJUST_B_SAVE_DTMF_PARAMETER_1     6
-#define ADJUST_B_SAVE_VOICE_1              7
-#define ADJUST_B_SWITCH_L1_1               8
-#define ADJUST_B_SWITCH_L1_2               9
-#define ADJUST_B_RESTORE_VOICE_1           10
-#define ADJUST_B_RESTORE_VOICE_2           11
-#define ADJUST_B_RESTORE_DTMF_PARAMETER_1  12
-#define ADJUST_B_RESTORE_DTMF_PARAMETER_2  13
-#define ADJUST_B_RESTORE_EC_1              14
-#define ADJUST_B_RESTORE_EC_2              15
-#define ADJUST_B_ASSIGN_L23_1              16
-#define ADJUST_B_ASSIGN_L23_2              17
-#define ADJUST_B_CONNECT_1                 18
-#define ADJUST_B_CONNECT_2                 19
-#define ADJUST_B_CONNECT_3                 20
-#define ADJUST_B_CONNECT_4                 21
-#define ADJUST_B_RESTORE_DTMF_1            22
-#define ADJUST_B_RESTORE_DTMF_2            23
-#define ADJUST_B_RESTORE_MIXER_1           24
-#define ADJUST_B_RESTORE_MIXER_2           25
-#define ADJUST_B_RESTORE_MIXER_3           26
-#define ADJUST_B_RESTORE_MIXER_4           27
-#define ADJUST_B_RESTORE_MIXER_5           28
-#define ADJUST_B_RESTORE_MIXER_6           29
-#define ADJUST_B_RESTORE_MIXER_7           30
-#define ADJUST_B_END                       31
-
-/*------------------------------------------------------------------*/
-/* XON Protocol def's                                               */
-/*------------------------------------------------------------------*/
-#define N_CH_XOFF               0x01
-#define N_XON_SENT              0x02
-#define N_XON_REQ               0x04
-#define N_XON_CONNECT_IND       0x08
-#define N_RX_FLOW_CONTROL_MASK  0x3f
-#define N_OK_FC_PENDING         0x80
-#define N_TX_FLOW_CONTROL_MASK  0xc0
-
-/*------------------------------------------------------------------*/
-/* NCPI state                                                       */
-/*------------------------------------------------------------------*/
-#define NCPI_VALID_CONNECT_B3_IND  0x01
-#define NCPI_VALID_CONNECT_B3_ACT  0x02
-#define NCPI_VALID_DISC_B3_IND     0x04
-#define NCPI_CONNECT_B3_ACT_SENT   0x08
-#define NCPI_NEGOTIATE_B3_SENT     0x10
-#define NCPI_MDM_CTS_ON_RECEIVED   0x40
-#define NCPI_MDM_DCD_ON_RECEIVED   0x80
-
-/*------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
deleted file mode 100644
index 5a95587..0000000
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/* $Id: divamnt.c,v 1.32.6.10 2005/02/11 19:40:25 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * Maint module
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "debug_if.h"
-
-static DEFINE_MUTEX(maint_mutex);
-static char *main_revision = "$Revision: 1.32.6.10 $";
-
-static int major;
-
-MODULE_DESCRIPTION("Maint driver for Eicon DIVA Server cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("DIVA card driver");
-MODULE_LICENSE("GPL");
-
-static int buffer_length = 128;
-module_param(buffer_length, int, 0);
-static unsigned long diva_dbg_mem = 0;
-module_param(diva_dbg_mem, ulong, 0);
-
-static char *DRIVERNAME =
-	"Eicon DIVA - MAINT module (http://www.melware.net)";
-static char *DRIVERLNAME = "diva_mnt";
-static char *DEVNAME = "DivasMAINT";
-char *DRIVERRELEASE_MNT = "2.0";
-
-static wait_queue_head_t msgwaitq;
-static unsigned long opened;
-
-extern int mntfunc_init(int *, void **, unsigned long);
-extern void mntfunc_finit(void);
-extern int maint_read_write(void __user *buf, int count);
-
-/*
- *  helper functions
- */
-static char *getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "1.0";
-
-	return rev;
-}
-
-/*
- * kernel/user space copy functions
- */
-int diva_os_copy_to_user(void *os_handle, void __user *dst, const void *src,
-			 int length)
-{
-	return (copy_to_user(dst, src, length));
-}
-int diva_os_copy_from_user(void *os_handle, void *dst, const void __user *src,
-			   int length)
-{
-	return (copy_from_user(dst, src, length));
-}
-
-/*
- * get time
- */
-void diva_os_get_time(dword *sec, dword *usec)
-{
-	struct timespec64 time;
-
-	ktime_get_ts64(&time);
-
-	*sec = (dword) time.tv_sec;
-	*usec = (dword) (time.tv_nsec / NSEC_PER_USEC);
-}
-
-/*
- * device node operations
- */
-static __poll_t maint_poll(struct file *file, poll_table *wait)
-{
-	__poll_t mask = 0;
-
-	poll_wait(file, &msgwaitq, wait);
-	mask = EPOLLOUT | EPOLLWRNORM;
-	if (file->private_data || diva_dbg_q_length()) {
-		mask |= EPOLLIN | EPOLLRDNORM;
-	}
-	return (mask);
-}
-
-static int maint_open(struct inode *ino, struct file *filep)
-{
-	int ret;
-
-	mutex_lock(&maint_mutex);
-	/* only one open is allowed, so we test
-	   it atomically */
-	if (test_and_set_bit(0, &opened))
-		ret = -EBUSY;
-	else {
-		filep->private_data = NULL;
-		ret = nonseekable_open(ino, filep);
-	}
-	mutex_unlock(&maint_mutex);
-	return ret;
-}
-
-static int maint_close(struct inode *ino, struct file *filep)
-{
-	if (filep->private_data) {
-		diva_os_free(0, filep->private_data);
-		filep->private_data = NULL;
-	}
-
-	/* clear 'used' flag */
-	clear_bit(0, &opened);
-
-	return (0);
-}
-
-static ssize_t divas_maint_write(struct file *file, const char __user *buf,
-				 size_t count, loff_t *ppos)
-{
-	return (maint_read_write((char __user *) buf, (int) count));
-}
-
-static ssize_t divas_maint_read(struct file *file, char __user *buf,
-				size_t count, loff_t *ppos)
-{
-	return (maint_read_write(buf, (int) count));
-}
-
-static const struct file_operations divas_maint_fops = {
-	.owner   = THIS_MODULE,
-	.llseek  = no_llseek,
-	.read    = divas_maint_read,
-	.write   = divas_maint_write,
-	.poll    = maint_poll,
-	.open    = maint_open,
-	.release = maint_close
-};
-
-static void divas_maint_unregister_chrdev(void)
-{
-	unregister_chrdev(major, DEVNAME);
-}
-
-static int __init divas_maint_register_chrdev(void)
-{
-	if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0)
-	{
-		printk(KERN_ERR "%s: failed to create /dev entry.\n",
-		       DRIVERLNAME);
-		return (0);
-	}
-
-	return (1);
-}
-
-/*
- * wake up reader
- */
-void diva_maint_wakeup_read(void)
-{
-	wake_up_interruptible(&msgwaitq);
-}
-
-/*
- *  Driver Load
- */
-static int __init maint_init(void)
-{
-	char tmprev[50];
-	int ret = 0;
-	void *buffer = NULL;
-
-	init_waitqueue_head(&msgwaitq);
-
-	printk(KERN_INFO "%s\n", DRIVERNAME);
-	printk(KERN_INFO "%s: Rel:%s  Rev:", DRIVERLNAME, DRIVERRELEASE_MNT);
-	strcpy(tmprev, main_revision);
-	printk("%s  Build: %s \n", getrev(tmprev), DIVA_BUILD);
-
-	if (!divas_maint_register_chrdev()) {
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!(mntfunc_init(&buffer_length, &buffer, diva_dbg_mem))) {
-		printk(KERN_ERR "%s: failed to connect to DIDD.\n",
-		       DRIVERLNAME);
-		divas_maint_unregister_chrdev();
-		ret = -EIO;
-		goto out;
-	}
-
-	printk(KERN_INFO "%s: trace buffer = %p - %d kBytes, %s (Major: %d)\n",
-	       DRIVERLNAME, buffer, (buffer_length / 1024),
-	       (diva_dbg_mem == 0) ? "internal" : "external", major);
-
-out:
-	return (ret);
-}
-
-/*
-**  Driver Unload
-*/
-static void __exit maint_exit(void)
-{
-	divas_maint_unregister_chrdev();
-	mntfunc_finit();
-
-	printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(maint_init);
-module_exit(maint_exit);
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
deleted file mode 100644
index 4be5f88..0000000
--- a/drivers/isdn/hardware/eicon/divasfunc.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/* $Id: divasfunc.c,v 1.23.4.2 2004/08/28 20:03:53 armin Exp $
- *
- * Low level driver for Eicon DIVA Server ISDN cards.
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "di.h"
-#include "io.h"
-#include "divasync.h"
-#include "diva.h"
-#include "xdi_vers.h"
-
-#define DBG_MINIMUM  (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT  (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-static int debugmask;
-
-extern void DIVA_DIDD_Read(void *, int);
-
-extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-
-extern char *DRIVERRELEASE_DIVAS;
-
-static dword notify_handle;
-static DESCRIPTOR DAdapter;
-static DESCRIPTOR MAdapter;
-
-/* --------------------------------------------------------------------------
-   MAINT driver connector section
-   -------------------------------------------------------------------------- */
-static void no_printf(unsigned char *x, ...)
-{
-	/* dummy debug function */
-}
-
-#include "debuglib.c"
-
-/*
- * get the adapters serial number
- */
-void diva_get_vserial_number(PISDN_ADAPTER IoAdapter, char *buf)
-{
-	int contr = 0;
-
-	if ((contr = ((IoAdapter->serialNo & 0xff000000) >> 24))) {
-		sprintf(buf, "%d-%d",
-			IoAdapter->serialNo & 0x00ffffff, contr + 1);
-	} else {
-		sprintf(buf, "%d", IoAdapter->serialNo);
-	}
-}
-
-/*
- * register a new adapter
- */
-void diva_xdi_didd_register_adapter(int card)
-{
-	DESCRIPTOR d;
-	IDI_SYNC_REQ req;
-
-	if (card && ((card - 1) < MAX_ADAPTER) &&
-	    IoAdapters[card - 1] && Requests[card - 1]) {
-		d.type = IoAdapters[card - 1]->Properties.DescType;
-		d.request = Requests[card - 1];
-		d.channels = IoAdapters[card - 1]->Properties.Channels;
-		d.features = IoAdapters[card - 1]->Properties.Features;
-		DBG_TRC(("DIDD register A(%d) channels=%d", card,
-			 d.channels))
-			/* workaround for different Name in structure */
-			strlcpy(IoAdapters[card - 1]->Name,
-				IoAdapters[card - 1]->Properties.Name,
-				sizeof(IoAdapters[card - 1]->Name));
-		req.didd_remove_adapter.e.Req = 0;
-		req.didd_add_adapter.e.Rc = IDI_SYNC_REQ_DIDD_ADD_ADAPTER;
-		req.didd_add_adapter.info.descriptor = (void *) &d;
-		DAdapter.request((ENTITY *)&req);
-		if (req.didd_add_adapter.e.Rc != 0xff) {
-			DBG_ERR(("DIDD register A(%d) failed !", card))
-				}
-		IoAdapters[card - 1]->os_trap_nfy_Fnc = NULL;
-	}
-}
-
-/*
- * remove an adapter
- */
-void diva_xdi_didd_remove_adapter(int card)
-{
-	IDI_SYNC_REQ req;
-	ADAPTER *a = &IoAdapters[card - 1]->a;
-
-	IoAdapters[card - 1]->os_trap_nfy_Fnc = NULL;
-	DBG_TRC(("DIDD de-register A(%d)", card))
-		req.didd_remove_adapter.e.Req = 0;
-	req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER;
-	req.didd_remove_adapter.info.p_request =
-		(IDI_CALL) Requests[card - 1];
-	DAdapter.request((ENTITY *)&req);
-	memset(&(a->IdTable), 0x00, 256);
-}
-
-/*
- * start debug
- */
-static void start_dbg(void)
-{
-	DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT);
-	DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s])",
-		 DIVA_BUILD, diva_xdi_common_code_build))
-		}
-
-/*
- * stop debug
- */
-static void stop_dbg(void)
-{
-	DbgDeregister();
-	memset(&MAdapter, 0, sizeof(MAdapter));
-	dprintf = no_printf;
-}
-
-/*
- * didd callback function
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
-			   int removal)
-{
-	if (adapter->type == IDI_DADAPTER) {
-		DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
-		return (NULL);
-	}
-
-	if (adapter->type == IDI_DIMAINT) {
-		if (removal) {
-			stop_dbg();
-		} else {
-			memcpy(&MAdapter, adapter, sizeof(MAdapter));
-			dprintf = (DIVA_DI_PRINTF) MAdapter.request;
-			start_dbg();
-		}
-	}
-	return (NULL);
-}
-
-/*
- * connect to didd
- */
-static int __init connect_didd(void)
-{
-	int x = 0;
-	int dadapter = 0;
-	IDI_SYNC_REQ req;
-	DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
-	DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
-	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-		if (DIDD_Table[x].type == IDI_DADAPTER) {	/* DADAPTER found */
-			dadapter = 1;
-			memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
-			req.didd_notify.e.Req = 0;
-			req.didd_notify.e.Rc =
-				IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
-			req.didd_notify.info.callback = (void *)didd_callback;
-			req.didd_notify.info.context = NULL;
-			DAdapter.request((ENTITY *)&req);
-			if (req.didd_notify.e.Rc != 0xff) {
-				stop_dbg();
-				return (0);
-			}
-			notify_handle = req.didd_notify.info.handle;
-		} else if (DIDD_Table[x].type == IDI_DIMAINT) {	/* MAINT found */
-			memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
-			dprintf = (DIVA_DI_PRINTF) MAdapter.request;
-			start_dbg();
-		}
-	}
-
-	if (!dadapter) {
-		stop_dbg();
-	}
-
-	return (dadapter);
-}
-
-/*
- * disconnect from didd
- */
-static void disconnect_didd(void)
-{
-	IDI_SYNC_REQ req;
-
-	stop_dbg();
-
-	req.didd_notify.e.Req = 0;
-	req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
-	req.didd_notify.info.handle = notify_handle;
-	DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * init
- */
-int __init divasfunc_init(int dbgmask)
-{
-	char *version;
-
-	debugmask = dbgmask;
-
-	if (!connect_didd()) {
-		DBG_ERR(("divasfunc: failed to connect to DIDD."))
-			return (0);
-	}
-
-	version = diva_xdi_common_code_build;
-
-	divasa_xdi_driver_entry();
-
-	return (1);
-}
-
-/*
- * exit
- */
-void divasfunc_exit(void)
-{
-	divasa_xdi_driver_unload();
-	disconnect_didd();
-}
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
deleted file mode 100644
index e7081e0..0000000
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ /dev/null
@@ -1,562 +0,0 @@
-/* $Id: divasi.c,v 1.25.6.2 2005/01/31 12:22:20 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * User Mode IDI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "um_xdi.h"
-#include "um_idi.h"
-
-static char *main_revision = "$Revision: 1.25.6.2 $";
-
-static int major;
-
-MODULE_DESCRIPTION("User IDI Interface for Eicon ISDN cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("DIVA card driver");
-MODULE_LICENSE("GPL");
-
-typedef struct _diva_um_idi_os_context {
-	wait_queue_head_t read_wait;
-	wait_queue_head_t close_wait;
-	struct timer_list diva_timer_id;
-	int aborted;
-	int adapter_nr;
-} diva_um_idi_os_context_t;
-
-static char *DRIVERNAME = "Eicon DIVA - User IDI (http://www.melware.net)";
-static char *DRIVERLNAME = "diva_idi";
-static char *DEVNAME = "DivasIDI";
-char *DRIVERRELEASE_IDI = "2.0";
-
-extern int idifunc_init(void);
-extern void idifunc_finit(void);
-
-/*
- *  helper functions
- */
-static char *getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "1.0";
-	return rev;
-}
-
-/*
- *  LOCALS
- */
-static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count,
-			   loff_t *offset);
-static ssize_t um_idi_write(struct file *file, const char __user *buf,
-			    size_t count, loff_t *offset);
-static __poll_t um_idi_poll(struct file *file, poll_table *wait);
-static int um_idi_open(struct inode *inode, struct file *file);
-static int um_idi_release(struct inode *inode, struct file *file);
-static int remove_entity(void *entity);
-static void diva_um_timer_function(struct timer_list *t);
-
-/*
- * proc entry
- */
-extern struct proc_dir_entry *proc_net_eicon;
-static struct proc_dir_entry *um_idi_proc_entry = NULL;
-
-static int um_idi_proc_show(struct seq_file *m, void *v)
-{
-	char tmprev[32];
-
-	seq_printf(m, "%s\n", DRIVERNAME);
-	seq_printf(m, "name     : %s\n", DRIVERLNAME);
-	seq_printf(m, "release  : %s\n", DRIVERRELEASE_IDI);
-	strcpy(tmprev, main_revision);
-	seq_printf(m, "revision : %s\n", getrev(tmprev));
-	seq_printf(m, "build    : %s\n", DIVA_BUILD);
-	seq_printf(m, "major    : %d\n", major);
-
-	return 0;
-}
-
-static int __init create_um_idi_proc(void)
-{
-	um_idi_proc_entry = proc_create_single(DRIVERLNAME, S_IRUGO,
-			proc_net_eicon, um_idi_proc_show);
-	if (!um_idi_proc_entry)
-		return (0);
-	return (1);
-}
-
-static void remove_um_idi_proc(void)
-{
-	if (um_idi_proc_entry) {
-		remove_proc_entry(DRIVERLNAME, proc_net_eicon);
-		um_idi_proc_entry = NULL;
-	}
-}
-
-static const struct file_operations divas_idi_fops = {
-	.owner   = THIS_MODULE,
-	.llseek  = no_llseek,
-	.read    = um_idi_read,
-	.write   = um_idi_write,
-	.poll    = um_idi_poll,
-	.open    = um_idi_open,
-	.release = um_idi_release
-};
-
-static void divas_idi_unregister_chrdev(void)
-{
-	unregister_chrdev(major, DEVNAME);
-}
-
-static int __init divas_idi_register_chrdev(void)
-{
-	if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0)
-	{
-		printk(KERN_ERR "%s: failed to create /dev entry.\n",
-		       DRIVERLNAME);
-		return (0);
-	}
-
-	return (1);
-}
-
-/*
-** Driver Load
-*/
-static int __init divasi_init(void)
-{
-	char tmprev[50];
-	int ret = 0;
-
-	printk(KERN_INFO "%s\n", DRIVERNAME);
-	printk(KERN_INFO "%s: Rel:%s  Rev:", DRIVERLNAME, DRIVERRELEASE_IDI);
-	strcpy(tmprev, main_revision);
-	printk("%s  Build: %s\n", getrev(tmprev), DIVA_BUILD);
-
-	if (!divas_idi_register_chrdev()) {
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!create_um_idi_proc()) {
-		divas_idi_unregister_chrdev();
-		printk(KERN_ERR "%s: failed to create proc entry.\n",
-		       DRIVERLNAME);
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!(idifunc_init())) {
-		remove_um_idi_proc();
-		divas_idi_unregister_chrdev();
-		printk(KERN_ERR "%s: failed to connect to DIDD.\n",
-		       DRIVERLNAME);
-		ret = -EIO;
-		goto out;
-	}
-	printk(KERN_INFO "%s: started with major %d\n", DRIVERLNAME, major);
-
-out:
-	return (ret);
-}
-
-
-/*
-** Driver Unload
-*/
-static void __exit divasi_exit(void)
-{
-	idifunc_finit();
-	remove_um_idi_proc();
-	divas_idi_unregister_chrdev();
-
-	printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divasi_init);
-module_exit(divasi_exit);
-
-
-/*
- *  FILE OPERATIONS
- */
-
-static int
-divas_um_idi_copy_to_user(void *os_handle, void *dst, const void *src,
-			  int length)
-{
-	memcpy(dst, src, length);
-	return (length);
-}
-
-static ssize_t
-um_idi_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
-	diva_um_idi_os_context_t *p_os;
-	int ret = -EINVAL;
-	void *data;
-
-	if (!file->private_data) {
-		return (-ENODEV);
-	}
-
-	if (!
-	    (p_os =
-	     (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->
-								    private_data)))
-	{
-		return (-ENODEV);
-	}
-	if (p_os->aborted) {
-		return (-ENODEV);
-	}
-
-	if (!(data = diva_os_malloc(0, count))) {
-		return (-ENOMEM);
-	}
-
-	ret = diva_um_idi_read(file->private_data,
-			       file, data, count,
-			       divas_um_idi_copy_to_user);
-	switch (ret) {
-	case 0:		/* no message available */
-		ret = (-EAGAIN);
-		break;
-	case (-1):		/* adapter was removed */
-		ret = (-ENODEV);
-		break;
-	case (-2):		/* message_length > length of user buffer */
-		ret = (-EFAULT);
-		break;
-	}
-
-	if (ret > 0) {
-		if (copy_to_user(buf, data, ret)) {
-			ret = (-EFAULT);
-		}
-	}
-
-	diva_os_free(0, data);
-	DBG_TRC(("read: ret %d", ret));
-	return (ret);
-}
-
-
-static int
-divas_um_idi_copy_from_user(void *os_handle, void *dst, const void *src,
-			    int length)
-{
-	memcpy(dst, src, length);
-	return (length);
-}
-
-static int um_idi_open_adapter(struct file *file, int adapter_nr)
-{
-	diva_um_idi_os_context_t *p_os;
-	void *e =
-		divas_um_idi_create_entity((dword) adapter_nr, (void *) file);
-
-	if (!(file->private_data = e)) {
-		return (0);
-	}
-	p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
-	init_waitqueue_head(&p_os->read_wait);
-	init_waitqueue_head(&p_os->close_wait);
-	timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
-	p_os->aborted = 0;
-	p_os->adapter_nr = adapter_nr;
-	return (1);
-}
-
-static ssize_t
-um_idi_write(struct file *file, const char __user *buf, size_t count,
-	     loff_t *offset)
-{
-	diva_um_idi_os_context_t *p_os;
-	int ret = -EINVAL;
-	void *data;
-	int adapter_nr = 0;
-
-	if (!file->private_data) {
-		/* the first write() selects the adapter_nr */
-		if (count == sizeof(int)) {
-			if (copy_from_user
-			    ((void *) &adapter_nr, buf,
-			     count)) return (-EFAULT);
-			if (!(um_idi_open_adapter(file, adapter_nr)))
-				return (-ENODEV);
-			return (count);
-		} else
-			return (-ENODEV);
-	}
-
-	if (!(p_os =
-	      (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->
-								     private_data)))
-	{
-		return (-ENODEV);
-	}
-	if (p_os->aborted) {
-		return (-ENODEV);
-	}
-
-	if (!(data = diva_os_malloc(0, count))) {
-		return (-ENOMEM);
-	}
-
-	if (copy_from_user(data, buf, count)) {
-		ret = -EFAULT;
-	} else {
-		ret = diva_um_idi_write(file->private_data,
-					file, data, count,
-					divas_um_idi_copy_from_user);
-		switch (ret) {
-		case 0:	/* no space available */
-			ret = (-EAGAIN);
-			break;
-		case (-1):	/* adapter was removed */
-			ret = (-ENODEV);
-			break;
-		case (-2):	/* length of user buffer > max message_length */
-			ret = (-EFAULT);
-			break;
-		}
-	}
-	diva_os_free(0, data);
-	DBG_TRC(("write: ret %d", ret));
-	return (ret);
-}
-
-static __poll_t um_idi_poll(struct file *file, poll_table *wait)
-{
-	diva_um_idi_os_context_t *p_os;
-
-	if (!file->private_data) {
-		return (EPOLLERR);
-	}
-
-	if ((!(p_os =
-	       (diva_um_idi_os_context_t *)
-	       diva_um_id_get_os_context(file->private_data)))
-	    || p_os->aborted) {
-		return (EPOLLERR);
-	}
-
-	poll_wait(file, &p_os->read_wait, wait);
-
-	if (p_os->aborted) {
-		return (EPOLLERR);
-	}
-
-	switch (diva_user_mode_idi_ind_ready(file->private_data, file)) {
-	case (-1):
-		return (EPOLLERR);
-
-	case 0:
-		return (0);
-	}
-
-	return (EPOLLIN | EPOLLRDNORM);
-}
-
-static int um_idi_open(struct inode *inode, struct file *file)
-{
-	return (0);
-}
-
-
-static int um_idi_release(struct inode *inode, struct file *file)
-{
-	diva_um_idi_os_context_t *p_os;
-	unsigned int adapter_nr;
-	int ret = 0;
-
-	if (!(file->private_data)) {
-		ret = -ENODEV;
-		goto out;
-	}
-
-	if (!(p_os =
-	      (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->private_data))) {
-		ret = -ENODEV;
-		goto out;
-	}
-
-	adapter_nr = p_os->adapter_nr;
-
-	if ((ret = remove_entity(file->private_data))) {
-		goto out;
-	}
-
-	if (divas_um_idi_delete_entity
-	    ((int) adapter_nr, file->private_data)) {
-		ret = -ENODEV;
-		goto out;
-	}
-
-out:
-	return (ret);
-}
-
-int diva_os_get_context_size(void)
-{
-	return (sizeof(diva_um_idi_os_context_t));
-}
-
-void diva_os_wakeup_read(void *os_context)
-{
-	diva_um_idi_os_context_t *p_os =
-		(diva_um_idi_os_context_t *) os_context;
-	wake_up_interruptible(&p_os->read_wait);
-}
-
-void diva_os_wakeup_close(void *os_context)
-{
-	diva_um_idi_os_context_t *p_os =
-		(diva_um_idi_os_context_t *) os_context;
-	wake_up_interruptible(&p_os->close_wait);
-}
-
-static
-void diva_um_timer_function(struct timer_list *t)
-{
-	diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
-
-	p_os->aborted = 1;
-	wake_up_interruptible(&p_os->read_wait);
-	wake_up_interruptible(&p_os->close_wait);
-	DBG_ERR(("entity removal watchdog"))
-		}
-
-/*
-**  If application exits without entity removal this function will remove
-**  entity and block until removal is complete
-*/
-static int remove_entity(void *entity)
-{
-	struct task_struct *curtask = current;
-	diva_um_idi_os_context_t *p_os;
-
-	diva_um_idi_stop_wdog(entity);
-
-	if (!entity) {
-		DBG_FTL(("Zero entity on remove"))
-			return (0);
-	}
-
-	if (!(p_os =
-	      (diva_um_idi_os_context_t *)
-	      diva_um_id_get_os_context(entity))) {
-		DBG_FTL(("Zero entity os context on remove"))
-			return (0);
-	}
-
-	if (!divas_um_idi_entity_assigned(entity) || p_os->aborted) {
-		/*
-		  Entity is not assigned, also can be removed
-		*/
-		return (0);
-	}
-
-	DBG_TRC(("E(%08x) check remove", entity))
-
-		/*
-		  If adapter not answers on remove request inside of
-		  10 Sec, then adapter is dead
-		*/
-		diva_um_idi_start_wdog(entity);
-
-	{
-		DECLARE_WAITQUEUE(wait, curtask);
-
-		add_wait_queue(&p_os->close_wait, &wait);
-		for (;;) {
-			set_current_state(TASK_INTERRUPTIBLE);
-			if (!divas_um_idi_entity_start_remove(entity)
-			    || p_os->aborted) {
-				break;
-			}
-			schedule();
-		}
-		set_current_state(TASK_RUNNING);
-		remove_wait_queue(&p_os->close_wait, &wait);
-	}
-
-	DBG_TRC(("E(%08x) start remove", entity))
-	{
-		DECLARE_WAITQUEUE(wait, curtask);
-
-		add_wait_queue(&p_os->close_wait, &wait);
-		for (;;) {
-			set_current_state(TASK_INTERRUPTIBLE);
-			if (!divas_um_idi_entity_assigned(entity)
-			    || p_os->aborted) {
-				break;
-			}
-			schedule();
-		}
-		set_current_state(TASK_RUNNING);
-		remove_wait_queue(&p_os->close_wait, &wait);
-	}
-
-	DBG_TRC(("E(%08x) remove complete, aborted:%d", entity,
-		 p_os->aborted))
-
-		diva_um_idi_stop_wdog(entity);
-
-	p_os->aborted = 0;
-
-	return (0);
-}
-
-/*
- * timer watchdog
- */
-void diva_um_idi_start_wdog(void *entity)
-{
-	diva_um_idi_os_context_t *p_os;
-
-	if (entity &&
-	    ((p_os =
-	      (diva_um_idi_os_context_t *)
-	      diva_um_id_get_os_context(entity)))) {
-		mod_timer(&p_os->diva_timer_id, jiffies + 10 * HZ);
-	}
-}
-
-void diva_um_idi_stop_wdog(void *entity)
-{
-	diva_um_idi_os_context_t *p_os;
-
-	if (entity &&
-	    ((p_os =
-	      (diva_um_idi_os_context_t *)
-	      diva_um_id_get_os_context(entity)))) {
-		del_timer(&p_os->diva_timer_id);
-	}
-}
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
deleted file mode 100644
index b6a3950..0000000
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ /dev/null
@@ -1,848 +0,0 @@
-/* $Id: divasmain.c,v 1.55.4.6 2005/02/09 19:28:20 armin Exp $
- *
- * Low level driver for Eicon DIVA Server ISDN cards.
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/poll.h>
-#include <linux/kmod.h>
-
-#include "platform.h"
-#undef ID_MASK
-#undef N_DATA
-#include "pc.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "diva.h"
-#include "di.h"
-#include "io.h"
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "xdi_vers.h"
-#include "diva_dma.h"
-#include "diva_pci.h"
-
-static char *main_revision = "$Revision: 1.55.4.6 $";
-
-static int major;
-
-static int dbgmask;
-
-MODULE_DESCRIPTION("Kernel driver for Eicon DIVA Server cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_LICENSE("GPL");
-
-module_param(dbgmask, int, 0);
-MODULE_PARM_DESC(dbgmask, "initial debug mask");
-
-static char *DRIVERNAME =
-	"Eicon DIVA Server driver (http://www.melware.net)";
-static char *DRIVERLNAME = "divas";
-static char *DEVNAME = "Divas";
-char *DRIVERRELEASE_DIVAS = "2.0";
-
-extern irqreturn_t diva_os_irq_wrapper(int irq, void *context);
-extern int create_divas_proc(void);
-extern void remove_divas_proc(void);
-extern void diva_get_vserial_number(PISDN_ADAPTER IoAdapter, char *buf);
-extern int divasfunc_init(int dbgmask);
-extern void divasfunc_exit(void);
-
-typedef struct _diva_os_thread_dpc {
-	struct tasklet_struct divas_task;
-	diva_os_soft_isr_t *psoft_isr;
-} diva_os_thread_dpc_t;
-
-/* --------------------------------------------------------------------------
-   PCI driver interface section
-   -------------------------------------------------------------------------- */
-/*
-  vendor, device	Vendor and device ID to match (or PCI_ANY_ID)
-  subvendor,	Subsystem vendor and device ID to match (or PCI_ANY_ID)
-  subdevice
-  class,		Device class to match. The class_mask tells which bits
-  class_mask	of the class are honored during the comparison.
-  driver_data	Data private to the driver.
-*/
-
-#if !defined(PCI_DEVICE_ID_EICON_MAESTRAP_2)
-#define PCI_DEVICE_ID_EICON_MAESTRAP_2       0xE015
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_4BRI_VOIP)
-#define PCI_DEVICE_ID_EICON_4BRI_VOIP        0xE016
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_4BRI_2_VOIP)
-#define PCI_DEVICE_ID_EICON_4BRI_2_VOIP      0xE017
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_BRI2M_2)
-#define PCI_DEVICE_ID_EICON_BRI2M_2          0xE018
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP)
-#define PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP  0xE019
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_2F)
-#define PCI_DEVICE_ID_EICON_2F               0xE01A
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_BRI2M_2_VOIP)
-#define PCI_DEVICE_ID_EICON_BRI2M_2_VOIP     0xE01B
-#endif
-
-/*
-  This table should be sorted by PCI device ID
-*/
-static const struct pci_device_id divas_pci_tbl[] = {
-	/* Diva Server BRI-2M PCI 0xE010 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
-	  CARDTYPE_MAESTRA_PCI },
-	/* Diva Server 4BRI-8M PCI 0xE012 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAQ),
-	  CARDTYPE_DIVASRV_Q_8M_PCI },
-	/* Diva Server 4BRI-8M 2.0 PCI 0xE013 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAQ_U),
-	  CARDTYPE_DIVASRV_Q_8M_V2_PCI },
-	/* Diva Server PRI-30M PCI 0xE014 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP),
-	  CARDTYPE_DIVASRV_P_30M_PCI },
-	/* Diva Server PRI 2.0 adapter 0xE015 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2),
-	  CARDTYPE_DIVASRV_P_30M_V2_PCI },
-	/* Diva Server Voice 4BRI-8M PCI 0xE016 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_4BRI_VOIP),
-	  CARDTYPE_DIVASRV_VOICE_Q_8M_PCI },
-	/* Diva Server Voice 4BRI-8M 2.0 PCI 0xE017 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_4BRI_2_VOIP),
-	  CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI },
-	/* Diva Server BRI-2M 2.0 PCI 0xE018 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_BRI2M_2),
-	  CARDTYPE_DIVASRV_B_2M_V2_PCI },
-	/* Diva Server Voice PRI 2.0 PCI 0xE019 */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP),
-	  CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI },
-	/* Diva Server 2FX 0xE01A */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_2F),
-	  CARDTYPE_DIVASRV_B_2F_PCI },
-	/* Diva Server Voice BRI-2M 2.0 PCI 0xE01B */
-	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_BRI2M_2_VOIP),
-	  CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI },
-	{ 0, }			/* 0 terminated list. */
-};
-MODULE_DEVICE_TABLE(pci, divas_pci_tbl);
-
-static int divas_init_one(struct pci_dev *pdev,
-			  const struct pci_device_id *ent);
-static void divas_remove_one(struct pci_dev *pdev);
-
-static struct pci_driver diva_pci_driver = {
-	.name     = "divas",
-	.probe    = divas_init_one,
-	.remove   = divas_remove_one,
-	.id_table = divas_pci_tbl,
-};
-
-/*********************************************************
- ** little helper functions
- *********************************************************/
-static char *getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "1.0";
-	return rev;
-}
-
-void diva_log_info(unsigned char *format, ...)
-{
-	va_list args;
-	unsigned char line[160];
-
-	va_start(args, format);
-	vsnprintf(line, sizeof(line), format, args);
-	va_end(args);
-
-	printk(KERN_INFO "%s: %s\n", DRIVERLNAME, line);
-}
-
-void divas_get_version(char *p)
-{
-	char tmprev[32];
-
-	strcpy(tmprev, main_revision);
-	sprintf(p, "%s: %s(%s) %s(%s) major=%d\n", DRIVERLNAME, DRIVERRELEASE_DIVAS,
-		getrev(tmprev), diva_xdi_common_code_build, DIVA_BUILD, major);
-}
-
-/* --------------------------------------------------------------------------
-   PCI Bus services
-   -------------------------------------------------------------------------- */
-byte diva_os_get_pci_bus(void *pci_dev_handle)
-{
-	struct pci_dev *pdev = (struct pci_dev *) pci_dev_handle;
-	return ((byte) pdev->bus->number);
-}
-
-byte diva_os_get_pci_func(void *pci_dev_handle)
-{
-	struct pci_dev *pdev = (struct pci_dev *) pci_dev_handle;
-	return ((byte) pdev->devfn);
-}
-
-unsigned long divasa_get_pci_irq(unsigned char bus, unsigned char func,
-				 void *pci_dev_handle)
-{
-	unsigned char irq = 0;
-	struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
-	irq = dev->irq;
-
-	return ((unsigned long) irq);
-}
-
-unsigned long divasa_get_pci_bar(unsigned char bus, unsigned char func,
-				 int bar, void *pci_dev_handle)
-{
-	unsigned long ret = 0;
-	struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
-	if (bar < 6) {
-		ret = dev->resource[bar].start;
-	}
-
-	DBG_TRC(("GOT BAR[%d]=%08x", bar, ret));
-
-	{
-		unsigned long type = (ret & 0x00000001);
-		if (type & PCI_BASE_ADDRESS_SPACE_IO) {
-			DBG_TRC(("  I/O"));
-			ret &= PCI_BASE_ADDRESS_IO_MASK;
-		} else {
-			DBG_TRC(("  memory"));
-			ret &= PCI_BASE_ADDRESS_MEM_MASK;
-		}
-		DBG_TRC(("  final=%08x", ret));
-	}
-
-	return (ret);
-}
-
-void PCIwrite(byte bus, byte func, int offset, void *data, int length,
-	      void *pci_dev_handle)
-{
-	struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
-	switch (length) {
-	case 1:		/* byte */
-		pci_write_config_byte(dev, offset,
-				      *(unsigned char *) data);
-		break;
-	case 2:		/* word */
-		pci_write_config_word(dev, offset,
-				      *(unsigned short *) data);
-		break;
-	case 4:		/* dword */
-		pci_write_config_dword(dev, offset,
-				       *(unsigned int *) data);
-		break;
-
-	default:		/* buffer */
-		if (!(length % 4) && !(length & 0x03)) {	/* Copy as dword */
-			dword *p = (dword *) data;
-			length /= 4;
-
-			while (length--) {
-				pci_write_config_dword(dev, offset,
-						       *(unsigned int *)
-						       p++);
-			}
-		} else {	/* copy as byte stream */
-			byte *p = (byte *) data;
-
-			while (length--) {
-				pci_write_config_byte(dev, offset,
-						      *(unsigned char *)
-						      p++);
-			}
-		}
-	}
-}
-
-void PCIread(byte bus, byte func, int offset, void *data, int length,
-	     void *pci_dev_handle)
-{
-	struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
-	switch (length) {
-	case 1:		/* byte */
-		pci_read_config_byte(dev, offset, (unsigned char *) data);
-		break;
-	case 2:		/* word */
-		pci_read_config_word(dev, offset, (unsigned short *) data);
-		break;
-	case 4:		/* dword */
-		pci_read_config_dword(dev, offset, (unsigned int *) data);
-		break;
-
-	default:		/* buffer */
-		if (!(length % 4) && !(length & 0x03)) {	/* Copy as dword */
-			dword *p = (dword *) data;
-			length /= 4;
-
-			while (length--) {
-				pci_read_config_dword(dev, offset,
-						      (unsigned int *)
-						      p++);
-			}
-		} else {	/* copy as byte stream */
-			byte *p = (byte *) data;
-
-			while (length--) {
-				pci_read_config_byte(dev, offset,
-						     (unsigned char *)
-						     p++);
-			}
-		}
-	}
-}
-
-/*
-  Init map with DMA pages. It is not problem if some allocations fail -
-  the channels that will not get one DMA page will use standard PIO
-  interface
-*/
-static void *diva_pci_alloc_consistent(struct pci_dev *hwdev,
-				       size_t size,
-				       dma_addr_t *dma_handle,
-				       void **addr_handle)
-{
-	void *addr = pci_alloc_consistent(hwdev, size, dma_handle);
-
-	*addr_handle = addr;
-
-	return (addr);
-}
-
-void diva_init_dma_map(void *hdev,
-		       struct _diva_dma_map_entry **ppmap, int nentries)
-{
-	struct pci_dev *pdev = (struct pci_dev *) hdev;
-	struct _diva_dma_map_entry *pmap =
-		diva_alloc_dma_map(hdev, nentries);
-
-	if (pmap) {
-		int i;
-		dma_addr_t dma_handle;
-		void *cpu_addr;
-		void *addr_handle;
-
-		for (i = 0; i < nentries; i++) {
-			if (!(cpu_addr = diva_pci_alloc_consistent(pdev,
-								   PAGE_SIZE,
-								   &dma_handle,
-								   &addr_handle)))
-			{
-				break;
-			}
-			diva_init_dma_map_entry(pmap, i, cpu_addr,
-						(dword) dma_handle,
-						addr_handle);
-			DBG_TRC(("dma map alloc [%d]=(%08lx:%08x:%08lx)",
-				 i, (unsigned long) cpu_addr,
-				 (dword) dma_handle,
-				 (unsigned long) addr_handle))}
-	}
-
-	*ppmap = pmap;
-}
-
-/*
-  Free all contained in the map entries and memory used by the map
-  Should be always called after adapter removal from DIDD array
-*/
-void diva_free_dma_map(void *hdev, struct _diva_dma_map_entry *pmap)
-{
-	struct pci_dev *pdev = (struct pci_dev *) hdev;
-	int i;
-	dword phys_addr;
-	void *cpu_addr;
-	dma_addr_t dma_handle;
-	void *addr_handle;
-
-	for (i = 0; (pmap != NULL); i++) {
-		diva_get_dma_map_entry(pmap, i, &cpu_addr, &phys_addr);
-		if (!cpu_addr) {
-			break;
-		}
-		addr_handle = diva_get_entry_handle(pmap, i);
-		dma_handle = (dma_addr_t) phys_addr;
-		pci_free_consistent(pdev, PAGE_SIZE, addr_handle,
-				    dma_handle);
-		DBG_TRC(("dma map free [%d]=(%08lx:%08x:%08lx)", i,
-			 (unsigned long) cpu_addr, (dword) dma_handle,
-			 (unsigned long) addr_handle))
-			}
-
-	diva_free_dma_mapping(pmap);
-}
-
-
-/*********************************************************
- ** I/O port utilities
- *********************************************************/
-
-int
-diva_os_register_io_port(void *adapter, int on, unsigned long port,
-			 unsigned long length, const char *name, int id)
-{
-	if (on) {
-		if (!request_region(port, length, name)) {
-			DBG_ERR(("A: I/O: can't register port=%08x", port))
-				return (-1);
-		}
-	} else {
-		release_region(port, length);
-	}
-	return (0);
-}
-
-void __iomem *divasa_remap_pci_bar(diva_os_xdi_adapter_t *a, int id, unsigned long bar, unsigned long area_length)
-{
-	void __iomem *ret = ioremap(bar, area_length);
-	DBG_TRC(("remap(%08x)->%p", bar, ret));
-	return (ret);
-}
-
-void divasa_unmap_pci_bar(void __iomem *bar)
-{
-	if (bar) {
-		iounmap(bar);
-	}
-}
-
-/*********************************************************
- ** I/O port access
- *********************************************************/
-inline byte inpp(void __iomem *addr)
-{
-	return (inb((unsigned long) addr));
-}
-
-inline word inppw(void __iomem *addr)
-{
-	return (inw((unsigned long) addr));
-}
-
-inline void inppw_buffer(void __iomem *addr, void *P, int length)
-{
-	insw((unsigned long) addr, (word *) P, length >> 1);
-}
-
-inline void outppw_buffer(void __iomem *addr, void *P, int length)
-{
-	outsw((unsigned long) addr, (word *) P, length >> 1);
-}
-
-inline void outppw(void __iomem *addr, word w)
-{
-	outw(w, (unsigned long) addr);
-}
-
-inline void outpp(void __iomem *addr, word p)
-{
-	outb(p, (unsigned long) addr);
-}
-
-/* --------------------------------------------------------------------------
-   IRQ request / remove
-   -------------------------------------------------------------------------- */
-int diva_os_register_irq(void *context, byte irq, const char *name)
-{
-	int result = request_irq(irq, diva_os_irq_wrapper,
-				 IRQF_SHARED, name, context);
-	return (result);
-}
-
-void diva_os_remove_irq(void *context, byte irq)
-{
-	free_irq(irq, context);
-}
-
-/* --------------------------------------------------------------------------
-   DPC framework implementation
-   -------------------------------------------------------------------------- */
-static void diva_os_dpc_proc(unsigned long context)
-{
-	diva_os_thread_dpc_t *psoft_isr = (diva_os_thread_dpc_t *) context;
-	diva_os_soft_isr_t *pisr = psoft_isr->psoft_isr;
-
-	(*(pisr->callback)) (pisr, pisr->callback_context);
-}
-
-int diva_os_initialize_soft_isr(diva_os_soft_isr_t *psoft_isr,
-				diva_os_soft_isr_callback_t callback,
-				void *callback_context)
-{
-	diva_os_thread_dpc_t *pdpc;
-
-	pdpc = (diva_os_thread_dpc_t *) diva_os_malloc(0, sizeof(*pdpc));
-	if (!(psoft_isr->object = pdpc)) {
-		return (-1);
-	}
-	memset(pdpc, 0x00, sizeof(*pdpc));
-	psoft_isr->callback = callback;
-	psoft_isr->callback_context = callback_context;
-	pdpc->psoft_isr = psoft_isr;
-	tasklet_init(&pdpc->divas_task, diva_os_dpc_proc, (unsigned long)pdpc);
-
-	return (0);
-}
-
-int diva_os_schedule_soft_isr(diva_os_soft_isr_t *psoft_isr)
-{
-	if (psoft_isr && psoft_isr->object) {
-		diva_os_thread_dpc_t *pdpc =
-			(diva_os_thread_dpc_t *) psoft_isr->object;
-
-		tasklet_schedule(&pdpc->divas_task);
-	}
-
-	return (1);
-}
-
-int diva_os_cancel_soft_isr(diva_os_soft_isr_t *psoft_isr)
-{
-	return (0);
-}
-
-void diva_os_remove_soft_isr(diva_os_soft_isr_t *psoft_isr)
-{
-	if (psoft_isr && psoft_isr->object) {
-		diva_os_thread_dpc_t *pdpc =
-			(diva_os_thread_dpc_t *) psoft_isr->object;
-		void *mem;
-
-		tasklet_kill(&pdpc->divas_task);
-		mem = psoft_isr->object;
-		psoft_isr->object = NULL;
-		diva_os_free(0, mem);
-	}
-}
-
-/*
- * kernel/user space copy functions
- */
-static int
-xdi_copy_to_user(void *os_handle, void __user *dst, const void *src, int length)
-{
-	if (copy_to_user(dst, src, length)) {
-		return (-EFAULT);
-	}
-	return (length);
-}
-
-static int
-xdi_copy_from_user(void *os_handle, void *dst, const void __user *src, int length)
-{
-	if (copy_from_user(dst, src, length)) {
-		return (-EFAULT);
-	}
-	return (length);
-}
-
-/*
- * device node operations
- */
-static int divas_open(struct inode *inode, struct file *file)
-{
-	return (0);
-}
-
-static int divas_release(struct inode *inode, struct file *file)
-{
-	if (file->private_data) {
-		diva_xdi_close_adapter(file->private_data, file);
-	}
-	return (0);
-}
-
-static ssize_t divas_write(struct file *file, const char __user *buf,
-			   size_t count, loff_t *ppos)
-{
-	diva_xdi_um_cfg_cmd_t msg;
-	int ret = -EINVAL;
-
-	if (!file->private_data) {
-		file->private_data = diva_xdi_open_adapter(file, buf,
-							   count, &msg,
-							   xdi_copy_from_user);
-		if (!file->private_data)
-			return (-ENODEV);
-		ret = diva_xdi_write(file->private_data, file,
-				     buf, count, &msg, xdi_copy_from_user);
-	} else {
-		ret = diva_xdi_write(file->private_data, file,
-				     buf, count, NULL, xdi_copy_from_user);
-	}
-
-	switch (ret) {
-	case -1:		/* Message should be removed from rx mailbox first */
-		ret = -EBUSY;
-		break;
-	case -2:		/* invalid adapter was specified in this call */
-		ret = -ENOMEM;
-		break;
-	case -3:
-		ret = -ENXIO;
-		break;
-	}
-	DBG_TRC(("write: ret %d", ret));
-	return (ret);
-}
-
-static ssize_t divas_read(struct file *file, char __user *buf,
-			  size_t count, loff_t *ppos)
-{
-	diva_xdi_um_cfg_cmd_t msg;
-	int ret = -EINVAL;
-
-	if (!file->private_data) {
-		file->private_data = diva_xdi_open_adapter(file, buf,
-							   count, &msg,
-							   xdi_copy_from_user);
-	}
-	if (!file->private_data) {
-		return (-ENODEV);
-	}
-
-	ret = diva_xdi_read(file->private_data, file,
-			    buf, count, xdi_copy_to_user);
-	switch (ret) {
-	case -1:		/* RX mailbox is empty */
-		ret = -EAGAIN;
-		break;
-	case -2:		/* no memory, mailbox was cleared, last command is failed */
-		ret = -ENOMEM;
-		break;
-	case -3:		/* can't copy to user, retry */
-		ret = -EFAULT;
-		break;
-	}
-	DBG_TRC(("read: ret %d", ret));
-	return (ret);
-}
-
-static __poll_t divas_poll(struct file *file, poll_table *wait)
-{
-	if (!file->private_data) {
-		return (EPOLLERR);
-	}
-	return (EPOLLIN | EPOLLRDNORM);
-}
-
-static const struct file_operations divas_fops = {
-	.owner   = THIS_MODULE,
-	.llseek  = no_llseek,
-	.read    = divas_read,
-	.write   = divas_write,
-	.poll    = divas_poll,
-	.open    = divas_open,
-	.release = divas_release
-};
-
-static void divas_unregister_chrdev(void)
-{
-	unregister_chrdev(major, DEVNAME);
-}
-
-static int __init divas_register_chrdev(void)
-{
-	if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0)
-	{
-		printk(KERN_ERR "%s: failed to create /dev entry.\n",
-		       DRIVERLNAME);
-		return (0);
-	}
-
-	return (1);
-}
-
-/* --------------------------------------------------------------------------
-   PCI driver section
-   -------------------------------------------------------------------------- */
-static int divas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	void *pdiva = NULL;
-	u8 pci_latency;
-	u8 new_latency = 32;
-
-	DBG_TRC(("%s bus: %08x fn: %08x insertion.\n",
-		 CardProperties[ent->driver_data].Name,
-		 pdev->bus->number, pdev->devfn))
-		printk(KERN_INFO "%s: %s bus: %08x fn: %08x insertion.\n",
-		       DRIVERLNAME, CardProperties[ent->driver_data].Name,
-		       pdev->bus->number, pdev->devfn);
-
-	if (pci_enable_device(pdev)) {
-		DBG_TRC(("%s: %s bus: %08x fn: %08x device init failed.\n",
-			 DRIVERLNAME,
-			 CardProperties[ent->driver_data].Name,
-			 pdev->bus->number,
-			 pdev->devfn))
-			printk(KERN_ERR
-			       "%s: %s bus: %08x fn: %08x device init failed.\n",
-			       DRIVERLNAME,
-			       CardProperties[ent->driver_data].
-			       Name, pdev->bus->number,
-			       pdev->devfn);
-		return (-EIO);
-	}
-
-	pci_set_master(pdev);
-
-	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
-	if (!pci_latency) {
-		DBG_TRC(("%s: bus: %08x fn: %08x fix latency.\n",
-			 DRIVERLNAME, pdev->bus->number, pdev->devfn))
-			printk(KERN_INFO
-			       "%s: bus: %08x fn: %08x fix latency.\n",
-			       DRIVERLNAME, pdev->bus->number, pdev->devfn);
-		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
-	}
-
-	if (!(pdiva = diva_driver_add_card(pdev, ent->driver_data))) {
-		DBG_TRC(("%s: %s bus: %08x fn: %08x card init failed.\n",
-			 DRIVERLNAME,
-			 CardProperties[ent->driver_data].Name,
-			 pdev->bus->number,
-			 pdev->devfn))
-			printk(KERN_ERR
-			       "%s: %s bus: %08x fn: %08x card init failed.\n",
-			       DRIVERLNAME,
-			       CardProperties[ent->driver_data].
-			       Name, pdev->bus->number,
-			       pdev->devfn);
-		return (-EIO);
-	}
-
-	pci_set_drvdata(pdev, pdiva);
-
-	return (0);
-}
-
-static void divas_remove_one(struct pci_dev *pdev)
-{
-	void *pdiva = pci_get_drvdata(pdev);
-
-	DBG_TRC(("bus: %08x fn: %08x removal.\n",
-		 pdev->bus->number, pdev->devfn))
-		printk(KERN_INFO "%s: bus: %08x fn: %08x removal.\n",
-		       DRIVERLNAME, pdev->bus->number, pdev->devfn);
-
-	if (pdiva) {
-		diva_driver_remove_card(pdiva);
-	}
-
-}
-
-/* --------------------------------------------------------------------------
-   Driver Load / Startup
-   -------------------------------------------------------------------------- */
-static int __init divas_init(void)
-{
-	char tmprev[50];
-	int ret = 0;
-
-	printk(KERN_INFO "%s\n", DRIVERNAME);
-	printk(KERN_INFO "%s: Rel:%s  Rev:", DRIVERLNAME, DRIVERRELEASE_DIVAS);
-	strcpy(tmprev, main_revision);
-	printk("%s  Build: %s(%s)\n", getrev(tmprev),
-	       diva_xdi_common_code_build, DIVA_BUILD);
-	printk(KERN_INFO "%s: support for: ", DRIVERLNAME);
-#ifdef CONFIG_ISDN_DIVAS_BRIPCI
-	printk("BRI/PCI ");
-#endif
-#ifdef CONFIG_ISDN_DIVAS_PRIPCI
-	printk("PRI/PCI ");
-#endif
-	printk("adapters\n");
-
-	if (!divasfunc_init(dbgmask)) {
-		printk(KERN_ERR "%s: failed to connect to DIDD.\n",
-		       DRIVERLNAME);
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!divas_register_chrdev()) {
-#ifdef MODULE
-		divasfunc_exit();
-#endif
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!create_divas_proc()) {
-#ifdef MODULE
-		divas_unregister_chrdev();
-		divasfunc_exit();
-#endif
-		printk(KERN_ERR "%s: failed to create proc entry.\n",
-		       DRIVERLNAME);
-		ret = -EIO;
-		goto out;
-	}
-
-	if ((ret = pci_register_driver(&diva_pci_driver))) {
-#ifdef MODULE
-		remove_divas_proc();
-		divas_unregister_chrdev();
-		divasfunc_exit();
-#endif
-		printk(KERN_ERR "%s: failed to init pci driver.\n",
-		       DRIVERLNAME);
-		goto out;
-	}
-	printk(KERN_INFO "%s: started with major %d\n", DRIVERLNAME, major);
-
-out:
-	return (ret);
-}
-
-/* --------------------------------------------------------------------------
-   Driver Unload
-   -------------------------------------------------------------------------- */
-static void __exit divas_exit(void)
-{
-	pci_unregister_driver(&diva_pci_driver);
-	remove_divas_proc();
-	divas_unregister_chrdev();
-	divasfunc_exit();
-
-	printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divas_init);
-module_exit(divas_exit);
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
deleted file mode 100644
index f52f462..0000000
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ /dev/null
@@ -1,412 +0,0 @@
-/* $Id: divasproc.c,v 1.19.4.3 2005/01/31 12:22:20 armin Exp $
- *
- * Low level driver for Eicon DIVA Server ISDN cards.
- * /proc functions
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/list.h>
-#include <linux/uaccess.h>
-
-#include "platform.h"
-#include "debuglib.h"
-#undef ID_MASK
-#undef N_DATA
-#include "pc.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "di.h"
-#include "io.h"
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "diva.h"
-#include "diva_pci.h"
-
-
-extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-extern void divas_get_version(char *);
-extern void diva_get_vserial_number(PISDN_ADAPTER IoAdapter, char *buf);
-
-/*********************************************************
- ** Functions for /proc interface / File operations
- *********************************************************/
-
-static char *divas_proc_name = "divas";
-static char *adapter_dir_name = "adapter";
-static char *info_proc_name = "info";
-static char *grp_opt_proc_name = "group_optimization";
-static char *d_l1_down_proc_name = "dynamic_l1_down";
-
-/*
-** "divas" entry
-*/
-
-extern struct proc_dir_entry *proc_net_eicon;
-static struct proc_dir_entry *divas_proc_entry = NULL;
-
-static ssize_t
-divas_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
-	int len = 0;
-	int cadapter;
-	char tmpbuf[80];
-	char tmpser[16];
-
-	if (*off)
-		return 0;
-
-	divas_get_version(tmpbuf);
-	if (copy_to_user(buf + len, &tmpbuf, strlen(tmpbuf)))
-		return -EFAULT;
-	len += strlen(tmpbuf);
-
-	for (cadapter = 0; cadapter < MAX_ADAPTER; cadapter++) {
-		if (IoAdapters[cadapter]) {
-			diva_get_vserial_number(IoAdapters[cadapter],
-						tmpser);
-			sprintf(tmpbuf,
-				"%2d: %-30s Serial:%-10s IRQ:%2d\n",
-				cadapter + 1,
-				IoAdapters[cadapter]->Properties.Name,
-				tmpser,
-				IoAdapters[cadapter]->irq_info.irq_nr);
-			if ((strlen(tmpbuf) + len) > count)
-				break;
-			if (copy_to_user
-			    (buf + len, &tmpbuf,
-			     strlen(tmpbuf))) return -EFAULT;
-			len += strlen(tmpbuf);
-		}
-	}
-
-	*off += len;
-	return (len);
-}
-
-static ssize_t
-divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
-	return (-ENODEV);
-}
-
-static __poll_t divas_poll(struct file *file, poll_table *wait)
-{
-	return (EPOLLERR);
-}
-
-static int divas_open(struct inode *inode, struct file *file)
-{
-	return nonseekable_open(inode, file);
-}
-
-static int divas_close(struct inode *inode, struct file *file)
-{
-	return (0);
-}
-
-static const struct file_operations divas_fops = {
-	.owner   = THIS_MODULE,
-	.llseek  = no_llseek,
-	.read    = divas_read,
-	.write   = divas_write,
-	.poll    = divas_poll,
-	.open    = divas_open,
-	.release = divas_close
-};
-
-int create_divas_proc(void)
-{
-	divas_proc_entry = proc_create(divas_proc_name, S_IFREG | S_IRUGO,
-				       proc_net_eicon, &divas_fops);
-	if (!divas_proc_entry)
-		return (0);
-
-	return (1);
-}
-
-void remove_divas_proc(void)
-{
-	if (divas_proc_entry) {
-		remove_proc_entry(divas_proc_name, proc_net_eicon);
-		divas_proc_entry = NULL;
-	}
-}
-
-static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
-				  size_t count, loff_t *pos)
-{
-	diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
-	PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
-	if ((count == 1) || (count == 2)) {
-		char c;
-		if (get_user(c, buffer))
-			return -EFAULT;
-		switch (c) {
-		case '0':
-			IoAdapter->capi_cfg.cfg_1 &=
-				~DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON;
-			break;
-		case '1':
-			IoAdapter->capi_cfg.cfg_1 |=
-				DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON;
-			break;
-		default:
-			return (-EINVAL);
-		}
-		return (count);
-	}
-	return (-EINVAL);
-}
-
-static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
-				    size_t count, loff_t *pos)
-{
-	diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
-	PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
-	if ((count == 1) || (count == 2)) {
-		char c;
-		if (get_user(c, buffer))
-			return -EFAULT;
-		switch (c) {
-		case '0':
-			IoAdapter->capi_cfg.cfg_1 &=
-				~DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON;
-			break;
-		case '1':
-			IoAdapter->capi_cfg.cfg_1 |=
-				DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON;
-			break;
-		default:
-			return (-EINVAL);
-		}
-		return (count);
-	}
-	return (-EINVAL);
-}
-
-static int d_l1_down_proc_show(struct seq_file *m, void *v)
-{
-	diva_os_xdi_adapter_t *a = m->private;
-	PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
-	seq_printf(m, "%s\n",
-		   (IoAdapter->capi_cfg.
-		    cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" :
-		   "0");
-	return 0;
-}
-
-static int d_l1_down_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, d_l1_down_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations d_l1_down_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= d_l1_down_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= d_l1_down_proc_write,
-};
-
-static int grp_opt_proc_show(struct seq_file *m, void *v)
-{
-	diva_os_xdi_adapter_t *a = m->private;
-	PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
-	seq_printf(m, "%s\n",
-		   (IoAdapter->capi_cfg.
-		    cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON)
-		   ? "1" : "0");
-	return 0;
-}
-
-static int grp_opt_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, grp_opt_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations grp_opt_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= grp_opt_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= grp_opt_proc_write,
-};
-
-static ssize_t info_proc_write(struct file *file, const char __user *buffer,
-			       size_t count, loff_t *pos)
-{
-	diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
-	PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-	char c[4];
-
-	if (count <= 4)
-		return -EINVAL;
-
-	if (copy_from_user(c, buffer, 4))
-		return -EFAULT;
-
-	/* this is for test purposes only */
-	if (!memcmp(c, "trap", 4)) {
-		(*(IoAdapter->os_trap_nfy_Fnc)) (IoAdapter, IoAdapter->ANum);
-		return (count);
-	}
-	return (-EINVAL);
-}
-
-static int info_proc_show(struct seq_file *m, void *v)
-{
-	int i = 0;
-	char *p;
-	char tmpser[16];
-	diva_os_xdi_adapter_t *a = m->private;
-	PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
-	seq_printf(m, "Name        : %s\n", IoAdapter->Properties.Name);
-	seq_printf(m, "DSP state   : %08x\n", a->dsp_mask);
-	seq_printf(m, "Channels    : %02d\n", IoAdapter->Properties.Channels);
-	seq_printf(m, "E. max/used : %03d/%03d\n",
-		   IoAdapter->e_max, IoAdapter->e_count);
-	diva_get_vserial_number(IoAdapter, tmpser);
-	seq_printf(m, "Serial      : %s\n", tmpser);
-	seq_printf(m, "IRQ         : %d\n", IoAdapter->irq_info.irq_nr);
-	seq_printf(m, "CardIndex   : %d\n", a->CardIndex);
-	seq_printf(m, "CardOrdinal : %d\n", a->CardOrdinal);
-	seq_printf(m, "Controller  : %d\n", a->controller);
-	seq_printf(m, "Bus-Type    : %s\n",
-		   (a->Bus ==
-		    DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI");
-	seq_printf(m, "Port-Name   : %s\n", a->port_name);
-	if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) {
-		seq_printf(m, "PCI-bus     : %d\n", a->resources.pci.bus);
-		seq_printf(m, "PCI-func    : %d\n", a->resources.pci.func);
-		for (i = 0; i < 8; i++) {
-			if (a->resources.pci.bar[i]) {
-				seq_printf(m,
-					   "Mem / I/O %d : 0x%x / mapped : 0x%lx",
-					   i, a->resources.pci.bar[i],
-					   (unsigned long) a->resources.
-					   pci.addr[i]);
-				if (a->resources.pci.length[i]) {
-					seq_printf(m,
-						   " / length : %d",
-						   a->resources.pci.
-						   length[i]);
-				}
-				seq_putc(m, '\n');
-			}
-		}
-	}
-	if ((!a->xdi_adapter.port) &&
-	    ((!a->xdi_adapter.ram) ||
-	     (!a->xdi_adapter.reset)
-	     || (!a->xdi_adapter.cfg))) {
-		if (!IoAdapter->irq_info.irq_nr) {
-			p = "slave";
-		} else {
-			p = "out of service";
-		}
-	} else if (a->xdi_adapter.trapped) {
-		p = "trapped";
-	} else if (a->xdi_adapter.Initialized) {
-		p = "active";
-	} else {
-		p = "ready";
-	}
-	seq_printf(m, "State       : %s\n", p);
-
-	return 0;
-}
-
-static int info_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, info_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations info_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= info_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= info_proc_write,
-};
-
-/*
-** adapter proc init/de-init
-*/
-
-/* --------------------------------------------------------------------------
-   Create adapter directory and files in proc file system
-   -------------------------------------------------------------------------- */
-int create_adapter_proc(diva_os_xdi_adapter_t *a)
-{
-	struct proc_dir_entry *de, *pe;
-	char tmp[16];
-
-	sprintf(tmp, "%s%d", adapter_dir_name, a->controller);
-	if (!(de = proc_mkdir(tmp, proc_net_eicon)))
-		return (0);
-	a->proc_adapter_dir = (void *) de;
-
-	pe = proc_create_data(info_proc_name, S_IRUGO | S_IWUSR, de,
-			      &info_proc_fops, a);
-	if (!pe)
-		return (0);
-	a->proc_info = (void *) pe;
-
-	pe = proc_create_data(grp_opt_proc_name, S_IRUGO | S_IWUSR, de,
-			      &grp_opt_proc_fops, a);
-	if (pe)
-		a->proc_grp_opt = (void *) pe;
-	pe = proc_create_data(d_l1_down_proc_name, S_IRUGO | S_IWUSR, de,
-			      &d_l1_down_proc_fops, a);
-	if (pe)
-		a->proc_d_l1_down = (void *) pe;
-
-	DBG_TRC(("proc entry %s created", tmp));
-
-	return (1);
-}
-
-/* --------------------------------------------------------------------------
-   Remove adapter directory and files in proc file system
-   -------------------------------------------------------------------------- */
-void remove_adapter_proc(diva_os_xdi_adapter_t *a)
-{
-	char tmp[16];
-
-	if (a->proc_adapter_dir) {
-		if (a->proc_d_l1_down) {
-			remove_proc_entry(d_l1_down_proc_name,
-					  (struct proc_dir_entry *) a->proc_adapter_dir);
-		}
-		if (a->proc_grp_opt) {
-			remove_proc_entry(grp_opt_proc_name,
-					  (struct proc_dir_entry *) a->proc_adapter_dir);
-		}
-		if (a->proc_info) {
-			remove_proc_entry(info_proc_name,
-					  (struct proc_dir_entry *) a->proc_adapter_dir);
-		}
-		sprintf(tmp, "%s%d", adapter_dir_name, a->controller);
-		remove_proc_entry(tmp, proc_net_eicon);
-		DBG_TRC(("proc entry %s%d removed", adapter_dir_name,
-			 a->controller));
-	}
-}
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
deleted file mode 100644
index dd6b53a..0000000
--- a/drivers/isdn/hardware/eicon/divasync.h
+++ /dev/null
@@ -1,489 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_SYNC__H
-#define __DIVA_SYNC__H
-#define IDI_SYNC_REQ_REMOVE             0x00
-#define IDI_SYNC_REQ_GET_NAME           0x01
-#define IDI_SYNC_REQ_GET_SERIAL         0x02
-#define IDI_SYNC_REQ_SET_POSTCALL       0x03
-#define IDI_SYNC_REQ_GET_XLOG           0x04
-#define IDI_SYNC_REQ_GET_FEATURES       0x05
-#define IDI_SYNC_REQ_USB_REGISTER       0x06
-#define IDI_SYNC_REQ_USB_RELEASE        0x07
-#define IDI_SYNC_REQ_USB_ADD_DEVICE     0x08
-#define IDI_SYNC_REQ_USB_START_DEVICE   0x09
-#define IDI_SYNC_REQ_USB_STOP_DEVICE    0x0A
-#define IDI_SYNC_REQ_USB_REMOVE_DEVICE  0x0B
-#define IDI_SYNC_REQ_GET_CARDTYPE       0x0C
-#define IDI_SYNC_REQ_GET_DBG_XLOG       0x0D
-#define DIVA_USB
-#define DIVA_USB_REQ                    0xAC
-#define DIVA_USB_TEST                   0xAB
-#define DIVA_USB_ADD_ADAPTER            0xAC
-#define DIVA_USB_REMOVE_ADAPTER         0xAD
-#define IDI_SYNC_REQ_SERIAL_HOOK        0x80
-#define IDI_SYNC_REQ_XCHANGE_STATUS     0x81
-#define IDI_SYNC_REQ_USB_HOOK           0x82
-#define IDI_SYNC_REQ_PORTDRV_HOOK       0x83
-#define IDI_SYNC_REQ_SLI                0x84   /*  SLI request from 3signal modem drivers */
-#define IDI_SYNC_REQ_RECONFIGURE        0x85
-#define IDI_SYNC_REQ_RESET              0x86
-#define IDI_SYNC_REQ_GET_85X_DEVICE_DATA     0x87
-#define IDI_SYNC_REQ_LOCK_85X                   0x88
-#define IDI_SYNC_REQ_DIVA_85X_USB_DATA_EXCHANGE 0x99
-#define IDI_SYNC_REQ_DIPORT_EXCHANGE_REQ   0x98
-#define IDI_SYNC_REQ_GET_85X_EXT_PORT_TYPE      0xA0
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES  0x92
-/*
-  To receive XDI features:
-  1. set 'buffer_length_in_bytes' to length of you buffer
-  2. set 'features' to pointer to your buffer
-  3. issue synchronous request to XDI
-  4. Check that feature 'DIVA_XDI_EXTENDED_FEATURES_VALID' is present
-  after call. This feature does indicate that your request
-  was processed and XDI does support this synchronous request
-  5. if on return bit 31 (0x80000000) in 'buffer_length_in_bytes' is
-  set then provided buffer was too small, and bits 30-0 does
-  contain necessary length of buffer.
-  in this case only features that do find place in the buffer
-  are indicated to caller
-*/
-typedef struct _diva_xdi_get_extended_xdi_features {
-	dword buffer_length_in_bytes;
-	byte  *features;
-} diva_xdi_get_extended_xdi_features_t;
-/*
-  features[0]
-*/
-#define DIVA_XDI_EXTENDED_FEATURES_VALID          0x01
-#define DIVA_XDI_EXTENDED_FEATURE_CMA             0x02
-#define DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR       0x04
-#define DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS       0x08
-#define DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC    0x10
-#define DIVA_XDI_EXTENDED_FEATURE_RX_DMA          0x20
-#define DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA  0x40
-#define DIVA_XDI_EXTENDED_FEATURE_WIDE_ID         0x80
-#define DIVA_XDI_EXTENDED_FEATURES_MAX_SZ    1
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR   0x93
-typedef struct _diva_xdi_get_adapter_sdram_bar {
-	dword bar;
-} diva_xdi_get_adapter_sdram_bar_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS   0x94
-/*
-  CAPI Parameters will be written in the caller's buffer
-*/
-typedef struct _diva_xdi_get_capi_parameters {
-	dword structure_length;
-	byte flag_dynamic_l1_down;
-	byte group_optimization_enabled;
-} diva_xdi_get_capi_parameters_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER   0x95
-/*
-  Get logical adapter number, as assigned by XDI
-  'controller' is starting with zero 'sub' controller number
-  in case of one adapter that supports multiple interfaces
-  'controller' is zero for Master adapter (and adapter that supports
-  only one interface)
-*/
-typedef struct _diva_xdi_get_logical_adapter_number {
-	dword logical_adapter_number;
-	dword controller;
-	dword total_controllers;
-} diva_xdi_get_logical_adapter_number_s_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_UP1DM_OPERATION   0x96
-/******************************************************************************/
-#define IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION 0x97
-#define IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC     0x01
-#define IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE      0x02
-typedef struct _diva_xdi_dma_descriptor_operation {
-	int operation;
-	int descriptor_number;
-	void *descriptor_address;
-	dword descriptor_magic;
-} diva_xdi_dma_descriptor_operation_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY   0x01
-#define IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY     0x02
-#define IDI_SYNC_REQ_DIDD_ADD_ADAPTER               0x03
-#define IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER            0x04
-#define IDI_SYNC_REQ_DIDD_READ_ADAPTER_ARRAY        0x05
-#define IDI_SYNC_REQ_DIDD_GET_CFG_LIB_IFC           0x10
-typedef struct _diva_didd_adapter_notify {
-	dword handle; /* Notification handle */
-	void *callback;
-	void *context;
-} diva_didd_adapter_notify_t;
-typedef struct _diva_didd_add_adapter {
-	void *descriptor;
-} diva_didd_add_adapter_t;
-typedef struct _diva_didd_remove_adapter {
-	IDI_CALL p_request;
-} diva_didd_remove_adapter_t;
-typedef struct _diva_didd_read_adapter_array {
-	void *buffer;
-	dword length;
-} diva_didd_read_adapter_array_t;
-typedef struct _diva_didd_get_cfg_lib_ifc {
-	void *ifc;
-} diva_didd_get_cfg_lib_ifc_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_STREAM    0x91
-#define DIVA_XDI_SYNCHRONOUS_SERVICE   0x01
-#define DIVA_XDI_DMA_SERVICE           0x02
-#define DIVA_XDI_AUTO_SERVICE          0x03
-#define DIVA_ISTREAM_COMPLETE_NOTIFY   0
-#define DIVA_ISTREAM_COMPLETE_READ     1
-#define DIVA_ISTREAM_COMPLETE_WRITE    2
-typedef struct _diva_xdi_stream_interface {
-	unsigned char  Id;                 /* filled by XDI client */
-	unsigned char provided_service;    /* filled by XDI        */
-	unsigned char requested_service;   /* filled by XDI Client */
-	void *xdi_context;    /* filled by XDI */
-	void *client_context;   /* filled by XDI client */
-	int (*write)(void *context,
-		     int Id,
-		     void *data,
-		     int length,
-		     int final,
-		     byte usr1,
-		     byte usr2);
-	int (*read)(void *context,
-		    int Id,
-		    void *data,
-		    int max_length,
-		    int *final,
-		    byte *usr1,
-		    byte *usr2);
-	int (*complete)(void *client_context,
-			int Id,
-			int what,
-			void *data,
-			int length,
-			int *final);
-} diva_xdi_stream_interface_t;
-/******************************************************************************/
-/*
- * IDI_SYNC_REQ_SERIAL_HOOK - special interface for the DIVA Mobile card
- */
-typedef struct
-{ unsigned char LineState;         /* Modem line state (STATUS_R) */
-#define SERIAL_GSM_CELL 0x01   /* GSM or CELL cable attached  */
-	unsigned char CardState;          /* PCMCIA card state (0 = down) */
-	unsigned char IsdnState;          /* ISDN layer 1 state (0 = down)*/
-	unsigned char HookState;          /* current logical hook state */
-#define SERIAL_ON_HOOK 0x02   /* set in DIVA CTRL_R register */
-} SERIAL_STATE;
-typedef int (*SERIAL_INT_CB)(void *Context);
-typedef int (*SERIAL_DPC_CB)(void *Context);
-typedef unsigned char (*SERIAL_I_SYNC)(void *Context);
-typedef struct
-{ /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
-	unsigned char Req;             /* request (must be always 0) */
-	unsigned char Rc;              /* return code (is the request) */
-	unsigned char Function;           /* private function code  */
-#define SERIAL_HOOK_ATTACH 0x81
-#define SERIAL_HOOK_STATUS 0x82
-#define SERIAL_HOOK_I_SYNC 0x83
-#define SERIAL_HOOK_NOECHO 0x84
-#define SERIAL_HOOK_RING 0x85
-#define SERIAL_HOOK_DETACH 0x8f
-	unsigned char Flags;           /* function refinements   */
-	/* parameters passed by the ATTACH request      */
-	SERIAL_INT_CB InterruptHandler; /* called on each interrupt  */
-	SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */
-	void   *HandlerContext; /* context for both handlers */
-	/* return values for both the ATTACH and the STATUS request   */
-	unsigned long IoBase;    /* IO port assigned to UART  */
-	SERIAL_STATE State;
-	/* parameters and return values for the I_SYNC function    */
-	SERIAL_I_SYNC SyncFunction;  /* to be called synchronized */
-	void   *SyncContext;  /* context for this function */
-	unsigned char SyncResult;   /* return value of function  */
-} SERIAL_HOOK;
-/*
- * IDI_SYNC_REQ_XCHANGE_STATUS - exchange the status between IDI and WMP
- * IDI_SYNC_REQ_RECONFIGURE - reconfiguration of IDI from WMP
- */
-typedef struct
-{ /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
-	unsigned char Req;             /* request (must be always 0) */
-	unsigned char Rc;              /* return code (is the request) */
-#define DRIVER_STATUS_BOOT  0xA1
-#define DRIVER_STATUS_INIT_DEV 0xA2
-#define DRIVER_STATUS_RUNNING 0xA3
-#define DRIVER_STATUS_SHUTDOWN 0xAF
-#define DRIVER_STATUS_TRAPPED 0xAE
-	unsigned char wmpStatus;          /* exported by WMP              */
-	unsigned char idiStatus;   /* exported by IDI              */
-	unsigned long wizProto;   /* from WMP registry to IDI     */
-	/* the cardtype value is defined by cardtype.h */
-	unsigned long cardType;   /* from IDI registry to WMP     */
-	unsigned long nt2;    /* from IDI registry to WMP     */
-	unsigned long permanent;   /* from IDI registry to WMP     */
-	unsigned long stableL2;   /* from IDI registry to WMP     */
-	unsigned long tei;    /* from IDI registry to WMP     */
-#define CRC4_MASK   0x00000003
-#define L1_TRISTATE_MASK 0x00000004
-#define WATCHDOG_MASK  0x00000008
-#define NO_ORDER_CHECK_MASK 0x00000010
-#define LOW_CHANNEL_MASK 0x00000020
-#define NO_HSCX30_MASK  0x00000040
-#define SET_BOARD   0x00001000
-#define SET_CRC4   0x00030000
-#define SET_L1_TRISTATE  0x00040000
-#define SET_WATCHDOG  0x00080000
-#define SET_NO_ORDER_CHECK 0x00100000
-#define SET_LOW_CHANNEL  0x00200000
-#define SET_NO_HSCX30  0x00400000
-#define SET_MODE   0x00800000
-#define SET_PROTO   0x02000000
-#define SET_CARDTYPE  0x04000000
-#define SET_NT2    0x08000000
-#define SET_PERMANENT  0x10000000
-#define SET_STABLEL2  0x20000000
-#define SET_TEI    0x40000000
-#define SET_NUMBERLEN  0x80000000
-	unsigned long Flag;  /* |31-Type-16|15-Mask-0| */
-	unsigned long NumberLen; /* reconfiguration: union is empty */
-	union {
-		struct {    /* possible reconfiguration, but ... ; SET_BOARD */
-			unsigned long SerialNumber;
-			char     *pCardname; /* di_defs.h: BOARD_NAME_LENGTH */
-		} board;
-		struct {      /* reset: need resources */
-			void *pRawResources;
-			void *pXlatResources;
-		} res;
-		struct { /* reconfiguration: wizProto == PROTTYPE_RBSCAS */
-#define GLARE_RESOLVE_MASK 0x00000001
-#define DID_MASK   0x00000002
-#define BEARER_CAP_MASK  0x0000000c
-#define SET_GLARE_RESOLVE 0x00010000
-#define SET_DID    0x00020000
-#define SET_BEARER_CAP  0x000c0000
-			unsigned long Flag;  /* |31-Type-16|15-VALUE-0| */
-			unsigned short DigitTimeout;
-			unsigned short AnswerDelay;
-		} rbs;
-		struct { /* reconfiguration: wizProto == PROTTYPE_QSIG */
-#define CALL_REF_LENGTH1_MASK 0x00000001
-#define BRI_CHANNEL_ID_MASK  0x00000002
-#define SET_CALL_REF_LENGTH  0x00010000
-#define SET_BRI_CHANNEL_ID  0x00020000
-			unsigned long Flag;  /* |31-Type-16|15-VALUE-0| */
-		} qsig;
-		struct { /* reconfiguration: NumberLen != 0 */
-#define SET_SPID1   0x00010000
-#define SET_NUMBER1   0x00020000
-#define SET_SUBADDRESS1  0x00040000
-#define SET_SPID2   0x00100000
-#define SET_NUMBER2   0x00200000
-#define SET_SUBADDRESS2  0x00400000
-#define MASK_SET   0xffff0000
-			unsigned long Flag;   /* |31-Type-16|15-Channel-0| */
-			unsigned char *pBuffer; /* number value */
-		} isdnNo;
-	}
-		parms
-		;
-} isdnProps;
-/*
- * IDI_SYNC_REQ_PORTDRV_HOOK - signal plug/unplug (Award Cardware only)
- */
-typedef void (*PORTDRV_HOOK_CB)(void *Context, int Plug);
-typedef struct
-{ /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
-	unsigned char Req;             /* request (must be always 0) */
-	unsigned char Rc;              /* return code (is the request) */
-	unsigned char Function;           /* private function code  */
-	unsigned char Flags;           /* function refinements   */
-	PORTDRV_HOOK_CB Callback;   /* to be called on plug/unplug */
-	void   *Context;   /* context for callback   */
-	unsigned long Info;    /* more info if needed   */
-} PORTDRV_HOOK;
-/*  Codes for the 'Rc' element in structure below. */
-#define SLI_INSTALL     (0xA1)
-#define SLI_UNINSTALL   (0xA2)
-typedef int (*SLIENTRYPOINT)(void *p3SignalAPI, void *pContext);
-typedef struct
-{   /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
-	unsigned char   Req;                /* request (must be always 0)   */
-	unsigned char   Rc;                 /* return code (is the request) */
-	unsigned char   Function;           /* private function code        */
-	unsigned char   Flags;              /* function refinements         */
-	SLIENTRYPOINT   Callback;           /* to be called on plug/unplug  */
-	void            *Context;           /* context for callback         */
-	unsigned long   Info;               /* more info if needed          */
-} SLIENTRYPOINT_REQ;
-/******************************************************************************/
-/*
- *  Definitions for DIVA USB
- */
-typedef int (*USB_SEND_REQ)(unsigned char PipeIndex, unsigned char Type, void *Data, int sizeData);
-typedef int (*USB_START_DEV)(void *Adapter, void *Ipac);
-/* called from WDM */
-typedef void (*USB_RECV_NOTIFY)(void *Ipac, void *msg);
-typedef void (*USB_XMIT_NOTIFY)(void *Ipac, unsigned char PipeIndex);
-/******************************************************************************/
-/*
- * Parameter description for synchronous requests.
- *
- * Sorry, must repeat some parts of di_defs.h here because
- * they are not defined for all operating environments
- */
-typedef union
-{ ENTITY Entity;
-	struct
-	{ /* 'Req' and 'Rc' are at the same place as in the ENTITY struct */
-		unsigned char   Req; /* request (must be always 0) */
-		unsigned char   Rc;  /* return code (is the request) */
-	}   Request;
-	struct
-	{ unsigned char   Req; /* request (must be always 0) */
-		unsigned char   Rc;  /* return code (0x01)   */
-		unsigned char   name[BOARD_NAME_LENGTH];
-	}   GetName;
-	struct
-	{ unsigned char   Req; /* request (must be always 0) */
-		unsigned char   Rc;  /* return code (0x02)   */
-		unsigned long   serial; /* serial number    */
-	}   GetSerial;
-	struct
-	{ unsigned char   Req; /* request (must be always 0) */
-		unsigned char   Rc;  /* return code (0x02)   */
-		unsigned long   lineIdx;/* line, 0 if card has only one */
-	}   GetLineIdx;
-	struct
-	{ unsigned char  Req;     /* request (must be always 0) */
-		unsigned char  Rc;      /* return code (0x02)   */
-		unsigned long  cardtype;/* card type        */
-	}   GetCardType;
-	struct
-	{ unsigned short command;/* command = 0x0300 */
-		unsigned short dummy; /* not used */
-		IDI_CALL       callback;/* routine to call back */
-		ENTITY      *contxt; /* ptr to entity to use */
-	}   PostCall;
-	struct
-	{ unsigned char  Req;  /* request (must be always 0) */
-		unsigned char  Rc;   /* return code (0x04)   */
-		unsigned char  pcm[1]; /* buffer (a pc_maint struct) */
-	}   GetXlog;
-	struct
-	{ unsigned char  Req;  /* request (must be always 0) */
-		unsigned char  Rc;   /* return code (0x05)   */
-		unsigned short features;/* feature defines see below */
-	}   GetFeatures;
-	SERIAL_HOOK  SerialHook;
-/* Added for DIVA USB */
-	struct
-	{ unsigned char   Req;
-		unsigned char   Rc;
-		USB_SEND_REQ    UsbSendRequest; /* function in Diva Usb WDM driver in usb_os.c, */
-		/* called from usb_drv.c to send a message to our device */
-		/* eg UsbSendRequest (USB_PIPE_SIGNAL, USB_IPAC_START, 0, 0); */
-		USB_RECV_NOTIFY usb_recv;       /* called from usb_os.c to pass a received message and ptr to IPAC */
-		/* on to usb_drv.c by a call to usb_recv(). */
-		USB_XMIT_NOTIFY usb_xmit;       /* called from usb_os.c in DivaUSB.sys WDM to indicate a completed transmit */
-		/* to usb_drv.c by a call to usb_xmit(). */
-		USB_START_DEV   UsbStartDevice; /* Start the USB Device, in usb_os.c */
-		IDI_CALL        callback;       /* routine to call back */
-		ENTITY          *contxt;     /* ptr to entity to use */
-		void **ipac_ptr;    /* pointer to struct IPAC in VxD */
-	} Usb_Msg_old;
-/* message used by WDM and VXD to pass pointers of function and IPAC* */
-	struct
-	{ unsigned char Req;
-		unsigned char Rc;
-		USB_SEND_REQ    pUsbSendRequest;/* function in Diva Usb WDM driver in usb_os.c, */
-		/* called from usb_drv.c to send a message to our device */
-		/* eg UsbSendRequest (USB_PIPE_SIGNAL, USB_IPAC_START, 0, 0); */
-		USB_RECV_NOTIFY p_usb_recv;     /* called from usb_os.c to pass a received message and ptr to IPAC */
-		/* on to usb_drv.c by a call to usb_recv(). */
-		USB_XMIT_NOTIFY p_usb_xmit;     /* called from usb_os.c in DivaUSB.sys WDM to indicate a completed transmit */
-		/* to usb_drv.c by a call to usb_xmit().*/
-		void            *ipac_ptr;      /* &Diva.ipac pointer to struct IPAC in VxD */
-	} Usb_Msg;
-	PORTDRV_HOOK PortdrvHook;
-	SLIENTRYPOINT_REQ   sliEntryPointReq;
-	struct {
-		unsigned char Req;
-		unsigned char Rc;
-		diva_xdi_stream_interface_t info;
-	} xdi_stream_info;
-	struct {
-		unsigned char Req;
-		unsigned char Rc;
-		diva_xdi_get_extended_xdi_features_t info;
-	} xdi_extended_features;
-	struct {
-		unsigned char Req;
-		unsigned char Rc;
-		diva_xdi_get_adapter_sdram_bar_t info;
-	} xdi_sdram_bar;
-	struct {
-		unsigned char Req;
-		unsigned char Rc;
-		diva_xdi_get_capi_parameters_t info;
-	} xdi_capi_prms;
-	struct {
-		ENTITY           e;
-		diva_didd_adapter_notify_t info;
-	} didd_notify;
-	struct {
-		ENTITY           e;
-		diva_didd_add_adapter_t   info;
-	} didd_add_adapter;
-	struct {
-		ENTITY           e;
-		diva_didd_remove_adapter_t info;
-	} didd_remove_adapter;
-	struct {
-		ENTITY             e;
-		diva_didd_read_adapter_array_t info;
-	} didd_read_adapter_array;
-	struct {
-		ENTITY             e;
-		diva_didd_get_cfg_lib_ifc_t     info;
-	} didd_get_cfg_lib_ifc;
-	struct {
-		unsigned char Req;
-		unsigned char Rc;
-		diva_xdi_get_logical_adapter_number_s_t info;
-	} xdi_logical_adapter_number;
-	struct {
-		unsigned char Req;
-		unsigned char Rc;
-		diva_xdi_dma_descriptor_operation_t info;
-	} xdi_dma_descriptor_operation;
-} IDI_SYNC_REQ;
-/******************************************************************************/
-#endif /* __DIVA_SYNC__H */
diff --git a/drivers/isdn/hardware/eicon/dqueue.c b/drivers/isdn/hardware/eicon/dqueue.c
deleted file mode 100644
index 7958a25..0000000
--- a/drivers/isdn/hardware/eicon/dqueue.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/* $Id: dqueue.c,v 1.5 2003/04/12 21:40:49 schindler Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * User Mode IDI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "dqueue.h"
-
-int
-diva_data_q_init(diva_um_idi_data_queue_t *q,
-		 int max_length, int max_segments)
-{
-	int i;
-
-	q->max_length = max_length;
-	q->segments = max_segments;
-
-	for (i = 0; i < q->segments; i++) {
-		q->data[i] = NULL;
-		q->length[i] = 0;
-	}
-	q->read = q->write = q->count = q->segment_pending = 0;
-
-	for (i = 0; i < q->segments; i++) {
-		if (!(q->data[i] = diva_os_malloc(0, q->max_length))) {
-			diva_data_q_finit(q);
-			return (-1);
-		}
-	}
-
-	return (0);
-}
-
-int diva_data_q_finit(diva_um_idi_data_queue_t *q)
-{
-	int i;
-
-	for (i = 0; i < q->segments; i++) {
-		if (q->data[i]) {
-			diva_os_free(0, q->data[i]);
-		}
-		q->data[i] = NULL;
-		q->length[i] = 0;
-	}
-	q->read = q->write = q->count = q->segment_pending = 0;
-
-	return (0);
-}
-
-int diva_data_q_get_max_length(const diva_um_idi_data_queue_t *q)
-{
-	return (q->max_length);
-}
-
-void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t *q)
-{
-	if ((!q->segment_pending) && (q->count < q->segments)) {
-		q->segment_pending = 1;
-		return (q->data[q->write]);
-	}
-
-	return NULL;
-}
-
-void
-diva_data_q_ack_segment4write(diva_um_idi_data_queue_t *q, int length)
-{
-	if (q->segment_pending) {
-		q->length[q->write] = length;
-		q->count++;
-		q->write++;
-		if (q->write >= q->segments) {
-			q->write = 0;
-		}
-		q->segment_pending = 0;
-	}
-}
-
-const void *diva_data_q_get_segment4read(const diva_um_idi_data_queue_t *
-					 q)
-{
-	if (q->count) {
-		return (q->data[q->read]);
-	}
-	return NULL;
-}
-
-int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t *q)
-{
-	return (q->length[q->read]);
-}
-
-void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t *q)
-{
-	if (q->count) {
-		q->length[q->read] = 0;
-		q->count--;
-		q->read++;
-		if (q->read >= q->segments) {
-			q->read = 0;
-		}
-	}
-}
diff --git a/drivers/isdn/hardware/eicon/dqueue.h b/drivers/isdn/hardware/eicon/dqueue.h
deleted file mode 100644
index 2da9799..0000000
--- a/drivers/isdn/hardware/eicon/dqueue.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: dqueue.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
-
-#ifndef _DIVA_USER_MODE_IDI_DATA_QUEUE_H__
-#define _DIVA_USER_MODE_IDI_DATA_QUEUE_H__
-
-#define DIVA_UM_IDI_MAX_MSGS 64
-
-typedef struct _diva_um_idi_data_queue {
-	int segments;
-	int max_length;
-	int read;
-	int write;
-	int count;
-	int segment_pending;
-	void *data[DIVA_UM_IDI_MAX_MSGS];
-	int length[DIVA_UM_IDI_MAX_MSGS];
-} diva_um_idi_data_queue_t;
-
-int diva_data_q_init(diva_um_idi_data_queue_t *q,
-		     int max_length, int max_segments);
-int diva_data_q_finit(diva_um_idi_data_queue_t *q);
-int diva_data_q_get_max_length(const diva_um_idi_data_queue_t *q);
-void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t *q);
-void diva_data_q_ack_segment4write(diva_um_idi_data_queue_t *q,
-				   int length);
-const void *diva_data_q_get_segment4read(const diva_um_idi_data_queue_t *
-					 q);
-int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t *q);
-void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t *q);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/dsp_defs.h b/drivers/isdn/hardware/eicon/dsp_defs.h
deleted file mode 100644
index 94828c8..0000000
--- a/drivers/isdn/hardware/eicon/dsp_defs.h
+++ /dev/null
@@ -1,301 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef DSP_DEFS_H_
-#define DSP_DEFS_H_
-#include "dspdids.h"
-/*---------------------------------------------------------------------------*/
-#define dsp_download_reserve_space(fp, length)
-/*****************************************************************************/
-/*
- * OS file access abstraction layer
- *
- * I/O functions returns -1 on error, 0 on EOF
- */
-struct _OsFileHandle_;
-typedef long (*OsFileIo)(struct _OsFileHandle_ *handle,
-			 void *buffer,
-			 long size);
-typedef long (*OsFileSeek)(struct _OsFileHandle_ *handle,
-			   long position,
-			   int mode);
-typedef long (*OsCardLoad)(struct _OsFileHandle_    *handle,
-			   long length,
-			   void **addr);
-typedef struct _OsFileHandle_
-{ void       *sysFileDesc;
-	unsigned long sysFileSize;
-	OsFileIo      sysFileRead;
-	OsFileSeek    sysFileSeek;
-	void       *sysLoadDesc;
-	OsCardLoad    sysCardLoad;
-} OsFileHandle;
-extern OsFileHandle *OsOpenFile(char *path_name);
-extern void          OsCloseFile(OsFileHandle *fp);
-/*****************************************************************************/
-#define DSP_TELINDUS_FILE "dspdload.bin"
-/* special DSP file for BRI cards for Qsig and CornetN because of missing memory */
-#define DSP_QSIG_TELINDUS_FILE "dspdqsig.bin"
-#define DSP_MDM_TELINDUS_FILE "dspdvmdm.bin"
-#define DSP_FAX_TELINDUS_FILE "dspdvfax.bin"
-#define DSP_DIRECTORY_ENTRIES 64
-#define DSP_MEMORY_TYPE_EXTERNAL_DM         0
-#define DSP_MEMORY_TYPE_EXTERNAL_PM         1
-#define DSP_MEMORY_TYPE_INTERNAL_DM         2
-#define DSP_MEMORY_TYPE_INTERNAL_PM         3
-#define DSP_DOWNLOAD_FLAG_BOOTABLE          0x0001
-#define DSP_DOWNLOAD_FLAG_2181              0x0002
-#define DSP_DOWNLOAD_FLAG_TIMECRITICAL      0x0004
-#define DSP_DOWNLOAD_FLAG_COMPAND           0x0008
-#define DSP_MEMORY_BLOCK_COUNT              16
-#define DSP_SEGMENT_PM_FLAG                 0x0001
-#define DSP_SEGMENT_SHARED_FLAG             0x0002
-#define DSP_SEGMENT_EXTERNAL_DM             DSP_MEMORY_TYPE_EXTERNAL_DM
-#define DSP_SEGMENT_EXTERNAL_PM             DSP_MEMORY_TYPE_EXTERNAL_PM
-#define DSP_SEGMENT_INTERNAL_DM             DSP_MEMORY_TYPE_INTERNAL_DM
-#define DSP_SEGMENT_INTERNAL_PM             DSP_MEMORY_TYPE_INTERNAL_PM
-#define DSP_SEGMENT_FIRST_RELOCATABLE       4
-#define DSP_DATA_BLOCK_PM_FLAG              0x0001
-#define DSP_DATA_BLOCK_DWORD_FLAG           0x0002
-#define DSP_DATA_BLOCK_RESOLVE_FLAG         0x0004
-#define DSP_RELOC_NONE                      0x00
-#define DSP_RELOC_SEGMENT_MASK              0x3f
-#define DSP_RELOC_TYPE_MASK                 0xc0
-#define DSP_RELOC_TYPE_0                    0x00  /* relocation of address in DM word / high part of PM word */
-#define DSP_RELOC_TYPE_1                    0x40  /* relocation of address in low part of PM data word */
-#define DSP_RELOC_TYPE_2                    0x80  /* relocation of address in standard command */
-#define DSP_RELOC_TYPE_3                    0xc0  /* relocation of address in call/jump on flag in */
-#define DSP_COMBIFILE_FORMAT_IDENTIFICATION_SIZE 48
-#define DSP_COMBIFILE_FORMAT_VERSION_BCD    0x0100
-#define DSP_FILE_FORMAT_IDENTIFICATION_SIZE 48
-#define DSP_FILE_FORMAT_VERSION_BCD         0x0100
-typedef struct tag_dsp_combifile_header
-{
-	char                  format_identification[DSP_COMBIFILE_FORMAT_IDENTIFICATION_SIZE];
-	word                  format_version_bcd;
-	word                  header_size;
-	word                  combifile_description_size;
-	word                  directory_entries;
-	word                  directory_size;
-	word                  download_count;
-	word                  usage_mask_size;
-} t_dsp_combifile_header;
-typedef struct tag_dsp_combifile_directory_entry
-{
-	word                  card_type_number;
-	word                  file_set_number;
-} t_dsp_combifile_directory_entry;
-typedef struct tag_dsp_file_header
-{
-	char                  format_identification[DSP_FILE_FORMAT_IDENTIFICATION_SIZE];
-	word                  format_version_bcd;
-	word                  download_id;
-	word                  download_flags;
-	word                  required_processing_power;
-	word                  interface_channel_count;
-	word                  header_size;
-	word                  download_description_size;
-	word                  memory_block_table_size;
-	word                  memory_block_count;
-	word                  segment_table_size;
-	word                  segment_count;
-	word                  symbol_table_size;
-	word                  symbol_count;
-	word                  total_data_size_dm;
-	word                  data_block_count_dm;
-	word                  total_data_size_pm;
-	word                  data_block_count_pm;
-} t_dsp_file_header;
-typedef struct tag_dsp_memory_block_desc
-{
-	word                  alias_memory_block;
-	word                  memory_type;
-	word                  address;
-	word                  size;             /* DSP words */
-} t_dsp_memory_block_desc;
-typedef struct tag_dsp_segment_desc
-{
-	word                  memory_block;
-	word                  attributes;
-	word                  base;
-	word                  size;
-	word                  alignment;        /* ==0 -> no other legal start address than base */
-} t_dsp_segment_desc;
-typedef struct tag_dsp_symbol_desc
-{
-	word                  symbol_id;
-	word                  segment;
-	word                  offset;
-	word                  size;             /* DSP words */
-} t_dsp_symbol_desc;
-typedef struct tag_dsp_data_block_header
-{
-	word                  attributes;
-	word                  segment;
-	word                  offset;
-	word                  size;             /* DSP words */
-} t_dsp_data_block_header;
-typedef struct tag_dsp_download_desc
-{
-	word                  download_id;
-	word                  download_flags;
-	word                  required_processing_power;
-	word                  interface_channel_count;
-	word                  excess_header_size;
-	word                  memory_block_count;
-	word                  segment_count;
-	word                  symbol_count;
-	word                  data_block_count_dm;
-	word                  data_block_count_pm;
-	byte *p_excess_header_data;
-	char *p_download_description;
-	t_dsp_memory_block_desc *p_memory_block_table;
-	t_dsp_segment_desc *p_segment_table;
-	t_dsp_symbol_desc *p_symbol_table;
-	word *p_data_blocks_dm;
-	word *p_data_blocks_pm;
-} t_dsp_desc;
-typedef struct tag_dsp_portable_download_desc /* be sure to keep native alignment for MAESTRA's */
-{
-	word                  download_id;
-	word                  download_flags;
-	word                  required_processing_power;
-	word                  interface_channel_count;
-	word                  excess_header_size;
-	word                  memory_block_count;
-	word                  segment_count;
-	word                  symbol_count;
-	word                  data_block_count_dm;
-	word                  data_block_count_pm;
-	dword                 p_excess_header_data;
-	dword                 p_download_description;
-	dword                 p_memory_block_table;
-	dword                 p_segment_table;
-	dword                 p_symbol_table;
-	dword                 p_data_blocks_dm;
-	dword                 p_data_blocks_pm;
-} t_dsp_portable_desc;
-#define DSP_DOWNLOAD_INDEX_KERNEL               0
-#define DSP30TX_DOWNLOAD_INDEX_KERNEL           1
-#define DSP30RX_DOWNLOAD_INDEX_KERNEL           2
-#define DSP_MAX_DOWNLOAD_COUNT                  64
-#define DSP_DOWNLOAD_MAX_SEGMENTS         16
-#define DSP_UDATA_REQUEST_RECONFIGURE     0
-/*
-  parameters:
-  <word> reconfigure delay (in 8kHz samples)
-  <word> reconfigure code
-  <byte> reconfigure hdlc preamble flags
-*/
-#define DSP_RECONFIGURE_TX_FLAG           0x8000
-#define DSP_RECONFIGURE_SHORT_TRAIN_FLAG  0x4000
-#define DSP_RECONFIGURE_ECHO_PROTECT_FLAG 0x2000
-#define DSP_RECONFIGURE_HDLC_FLAG         0x1000
-#define DSP_RECONFIGURE_SYNC_FLAG         0x0800
-#define DSP_RECONFIGURE_PROTOCOL_MASK     0x00ff
-#define DSP_RECONFIGURE_IDLE              0
-#define DSP_RECONFIGURE_V25               1
-#define DSP_RECONFIGURE_V21_CH2           2
-#define DSP_RECONFIGURE_V27_2400          3
-#define DSP_RECONFIGURE_V27_4800          4
-#define DSP_RECONFIGURE_V29_7200          5
-#define DSP_RECONFIGURE_V29_9600          6
-#define DSP_RECONFIGURE_V33_12000         7
-#define DSP_RECONFIGURE_V33_14400         8
-#define DSP_RECONFIGURE_V17_7200          9
-#define DSP_RECONFIGURE_V17_9600          10
-#define DSP_RECONFIGURE_V17_12000         11
-#define DSP_RECONFIGURE_V17_14400         12
-/*
-  data indications if transparent framer
-  <byte> data 0
-  <byte> data 1
-  ...
-  data indications if HDLC framer
-  <byte> data 0
-  <byte> data 1
-  ...
-  <byte> CRC 0
-  <byte> CRC 1
-  <byte> preamble flags
-*/
-#define DSP_UDATA_INDICATION_SYNC         0
-/*
-  returns:
-  <word> time of sync (sampled from counter at 8kHz)
-*/
-#define DSP_UDATA_INDICATION_DCD_OFF      1
-/*
-  returns:
-  <word> time of DCD off (sampled from counter at 8kHz)
-*/
-#define DSP_UDATA_INDICATION_DCD_ON       2
-/*
-  returns:
-  <word> time of DCD on (sampled from counter at 8kHz)
-  <byte> connected norm
-  <word> connected options
-  <dword> connected speed (bit/s)
-*/
-#define DSP_UDATA_INDICATION_CTS_OFF      3
-/*
-  returns:
-  <word> time of CTS off (sampled from counter at 8kHz)
-*/
-#define DSP_UDATA_INDICATION_CTS_ON       4
-/*
-  returns:
-  <word> time of CTS on (sampled from counter at 8kHz)
-  <byte> connected norm
-  <word> connected options
-  <dword> connected speed (bit/s)
-*/
-#define DSP_CONNECTED_NORM_UNSPECIFIED      0
-#define DSP_CONNECTED_NORM_V21              1
-#define DSP_CONNECTED_NORM_V23              2
-#define DSP_CONNECTED_NORM_V22              3
-#define DSP_CONNECTED_NORM_V22_BIS          4
-#define DSP_CONNECTED_NORM_V32_BIS          5
-#define DSP_CONNECTED_NORM_V34              6
-#define DSP_CONNECTED_NORM_V8               7
-#define DSP_CONNECTED_NORM_BELL_212A        8
-#define DSP_CONNECTED_NORM_BELL_103         9
-#define DSP_CONNECTED_NORM_V29_LEASED_LINE  10
-#define DSP_CONNECTED_NORM_V33_LEASED_LINE  11
-#define DSP_CONNECTED_NORM_TFAST            12
-#define DSP_CONNECTED_NORM_V21_CH2          13
-#define DSP_CONNECTED_NORM_V27_TER          14
-#define DSP_CONNECTED_NORM_V29              15
-#define DSP_CONNECTED_NORM_V33              16
-#define DSP_CONNECTED_NORM_V17              17
-#define DSP_CONNECTED_OPTION_TRELLIS        0x0001
-/*---------------------------------------------------------------------------*/
-extern char *dsp_read_file(OsFileHandle *fp,
-			   word card_type_number,
-			   word *p_dsp_download_count,
-			   t_dsp_desc *p_dsp_download_table,
-			   t_dsp_portable_desc *p_dsp_portable_download_table);
-/*---------------------------------------------------------------------------*/
-#endif /* DSP_DEFS_H_ */
diff --git a/drivers/isdn/hardware/eicon/dsp_tst.h b/drivers/isdn/hardware/eicon/dsp_tst.h
deleted file mode 100644
index 85edd3e..0000000
--- a/drivers/isdn/hardware/eicon/dsp_tst.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: dsp_tst.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
-
-#ifndef __DIVA_PRI_HOST_TEST_DSPS_H__
-#define __DIVA_PRI_HOST_TEST_DSPS_H__
-
-/*
-  DSP registers on maestra pri
-*/
-#define DSP1_PORT       (0x00)
-#define DSP2_PORT       (0x8)
-#define DSP3_PORT       (0x800)
-#define DSP4_PORT       (0x808)
-#define DSP5_PORT       (0x810)
-#define DSP6_PORT       (0x818)
-#define DSP7_PORT       (0x820)
-#define DSP8_PORT       (0x828)
-#define DSP9_PORT       (0x830)
-#define DSP10_PORT      (0x840)
-#define DSP11_PORT      (0x848)
-#define DSP12_PORT      (0x850)
-#define DSP13_PORT      (0x858)
-#define DSP14_PORT      (0x860)
-#define DSP15_PORT      (0x868)
-#define DSP16_PORT      (0x870)
-#define DSP17_PORT      (0x1000)
-#define DSP18_PORT      (0x1008)
-#define DSP19_PORT      (0x1010)
-#define DSP20_PORT      (0x1018)
-#define DSP21_PORT      (0x1020)
-#define DSP22_PORT      (0x1028)
-#define DSP23_PORT      (0x1030)
-#define DSP24_PORT      (0x1040)
-#define DSP25_PORT      (0x1048)
-#define DSP26_PORT      (0x1050)
-#define DSP27_PORT      (0x1058)
-#define DSP28_PORT      (0x1060)
-#define DSP29_PORT      (0x1068)
-#define DSP30_PORT      (0x1070)
-#define DSP_ADR_OFFS    0x80
-
-/*------------------------------------------------------------------
-  Dsp related definitions
-  ------------------------------------------------------------------ */
-#define DSP_SIGNATURE_PROBE_WORD 0x5a5a
-#define dsp_make_address_ex(pm, address) ((word)((pm) ? (address) : (address) + 0x4000))
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/dspdids.h b/drivers/isdn/hardware/eicon/dspdids.h
deleted file mode 100644
index 957b33c..0000000
--- a/drivers/isdn/hardware/eicon/dspdids.h
+++ /dev/null
@@ -1,75 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef DSPDIDS_H_
-#define DSPDIDS_H_
-/*---------------------------------------------------------------------------*/
-#define DSP_DID_INVALID   0
-#define DSP_DID_DIVA   1
-#define DSP_DID_DIVA_PRO  2
-#define DSP_DID_DIVA_PRO_20  3
-#define DSP_DID_DIVA_PRO_PCCARD  4
-#define DSP_DID_DIVA_SERVER_BRI_1M 5
-#define DSP_DID_DIVA_SERVER_BRI_2M 6
-#define DSP_DID_DIVA_SERVER_PRI_2M_TX 7
-#define DSP_DID_DIVA_SERVER_PRI_2M_RX 8
-#define DSP_DID_DIVA_SERVER_PRI_30M 9
-#define DSP_DID_TASK_HSCX  100
-#define DSP_DID_TASK_HSCX_PRI_2M_TX 101
-#define DSP_DID_TASK_HSCX_PRI_2M_RX 102
-#define DSP_DID_TASK_V110KRNL  200
-#define DSP_DID_OVERLAY_V1100  201
-#define DSP_DID_OVERLAY_V1101  202
-#define DSP_DID_OVERLAY_V1102  203
-#define DSP_DID_OVERLAY_V1103  204
-#define DSP_DID_OVERLAY_V1104  205
-#define DSP_DID_OVERLAY_V1105  206
-#define DSP_DID_OVERLAY_V1106  207
-#define DSP_DID_OVERLAY_V1107  208
-#define DSP_DID_OVERLAY_V1108  209
-#define DSP_DID_OVERLAY_V1109  210
-#define DSP_DID_TASK_V110_PRI_2M_TX 220
-#define DSP_DID_TASK_V110_PRI_2M_RX 221
-#define DSP_DID_TASK_MODEM  300
-#define DSP_DID_TASK_FAX05  400
-#define DSP_DID_TASK_VOICE  500
-#define DSP_DID_TASK_TIKRNL81  600
-#define DSP_DID_OVERLAY_DIAL  601
-#define DSP_DID_OVERLAY_V22  602
-#define DSP_DID_OVERLAY_V32  603
-#define DSP_DID_OVERLAY_FSK  604
-#define DSP_DID_OVERLAY_FAX  605
-#define DSP_DID_OVERLAY_VXX  606
-#define DSP_DID_OVERLAY_V8  607
-#define DSP_DID_OVERLAY_INFO  608
-#define DSP_DID_OVERLAY_V34  609
-#define DSP_DID_OVERLAY_DFX  610
-#define DSP_DID_PARTIAL_OVERLAY_DIAL 611
-#define DSP_DID_PARTIAL_OVERLAY_FSK 612
-#define DSP_DID_PARTIAL_OVERLAY_FAX 613
-#define DSP_DID_TASK_TIKRNL05  700
-/*---------------------------------------------------------------------------*/
-#endif
-/*---------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/dsrv4bri.h b/drivers/isdn/hardware/eicon/dsrv4bri.h
deleted file mode 100644
index f353fb6..0000000
--- a/drivers/isdn/hardware/eicon/dsrv4bri.h
+++ /dev/null
@@ -1,40 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_DSRV_4_BRI_INC__
-#define __DIVA_XDI_DSRV_4_BRI_INC__
-/*
- * Some special registers in the PLX 9054
- */
-#define PLX9054_P2LDBELL    0x60
-#define PLX9054_L2PDBELL    0x64
-#define PLX9054_INTCSR      0x69
-#define PLX9054_INT_ENABLE  0x09
-#define PLX9054_SOFT_RESET 0x4000
-#define PLX9054_RELOAD_EEPROM 0x2000
-#define DIVA_4BRI_REVISION(__x__) (((__x__)->cardType == CARDTYPE_DIVASRV_Q_8M_V2_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_B_2M_V2_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_B_2F_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI))
-void diva_os_set_qBri_functions(PISDN_ADAPTER IoAdapter);
-void diva_os_set_qBri2_functions(PISDN_ADAPTER IoAdapter);
-#endif
diff --git a/drivers/isdn/hardware/eicon/dsrv_bri.h b/drivers/isdn/hardware/eicon/dsrv_bri.h
deleted file mode 100644
index 8a67dbc..0000000
--- a/drivers/isdn/hardware/eicon/dsrv_bri.h
+++ /dev/null
@@ -1,37 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_DSRV_BRI_INC__
-#define __DIVA_XDI_DSRV_BRI_INC__
-/*
-  Functions exported from os dependent part of
-  BRI card configuration and used in
-  OS independed part
-*/
-/*
-  Prepare OS dependent part of BRI functions
-*/
-void diva_os_prepare_maestra_functions(PISDN_ADAPTER IoAdapter);
-#endif
diff --git a/drivers/isdn/hardware/eicon/dsrv_pri.h b/drivers/isdn/hardware/eicon/dsrv_pri.h
deleted file mode 100644
index fd1a9ff..0000000
--- a/drivers/isdn/hardware/eicon/dsrv_pri.h
+++ /dev/null
@@ -1,38 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_DSRV_PRI_INC__
-#define __DIVA_XDI_DSRV_PRI_INC__
-/*
-  Functions exported from os dependent part of
-  PRI card configuration and used in
-  OS independed part
-*/
-/*
-  Prepare OS dependent part of PRI/PRI Rev.2 functions
-*/
-void diva_os_prepare_pri_functions(PISDN_ADAPTER IoAdapter);
-void diva_os_prepare_pri2_functions(PISDN_ADAPTER IoAdapter);
-#endif
diff --git a/drivers/isdn/hardware/eicon/entity.h b/drivers/isdn/hardware/eicon/entity.h
deleted file mode 100644
index f9767d3..0000000
--- a/drivers/isdn/hardware/eicon/entity.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: entity.h,v 1.4 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVAS_USER_MODE_IDI_ENTITY__
-#define __DIVAS_USER_MODE_IDI_ENTITY__
-
-#define DIVA_UM_IDI_RC_PENDING      0x00000001
-#define DIVA_UM_IDI_REMOVE_PENDING  0x00000002
-#define DIVA_UM_IDI_TX_FLOW_CONTROL 0x00000004
-#define DIVA_UM_IDI_REMOVED         0x00000008
-#define DIVA_UM_IDI_ASSIGN_PENDING  0x00000010
-
-typedef struct _divas_um_idi_entity {
-	struct list_head          link;
-	diva_um_idi_adapter_t *adapter; /* Back to adapter */
-	ENTITY e;
-	void *os_ref;
-	dword status;
-	void *os_context;
-	int rc_count;
-	diva_um_idi_data_queue_t  data; /* definad by user 1 ... MAX */
-	diva_um_idi_data_queue_t  rc;   /* two entries */
-	BUFFERS                   XData;
-	BUFFERS                   RData;
-	byte                      buffer[2048 + 512];
-} divas_um_idi_entity_t;
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/helpers.h b/drivers/isdn/hardware/eicon/helpers.h
deleted file mode 100644
index c9156b0..0000000
--- a/drivers/isdn/hardware/eicon/helpers.h
+++ /dev/null
@@ -1,51 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_CARD_CONFIG_HELPERS_INC__
-#define __DIVA_XDI_CARD_CONFIG_HELPERS_INC__
-dword diva_get_protocol_file_features(byte *File,
-				      int offset,
-				      char *IdStringBuffer,
-				      dword IdBufferSize);
-void diva_configure_protocol(PISDN_ADAPTER IoAdapter);
-/*
-  Low level file access system abstraction
-*/
-/* -------------------------------------------------------------------------
-   Access to single file
-   Return pointer to the image of the requested file,
-   write image length to 'FileLength'
-   ------------------------------------------------------------------------- */
-void *xdiLoadFile(char *FileName, dword *FileLength, unsigned long MaxLoadSize);
-/* -------------------------------------------------------------------------
-   Dependent on the protocol settings does read return pointer
-   to the image of appropriate protocol file
-   ------------------------------------------------------------------------- */
-void *xdiLoadArchive(PISDN_ADAPTER IoAdapter, dword *FileLength, unsigned long MaxLoadSize);
-/* --------------------------------------------------------------------------
-   Free all system resources accessed by xdiLoadFile and xdiLoadArchive
-   -------------------------------------------------------------------------- */
-void xdiFreeFile(void *handle);
-#endif
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
deleted file mode 100644
index fef6586..0000000
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/* $Id: idifunc.c,v 1.14.4.4 2004/08/28 20:03:53 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * User Mode IDI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "um_xdi.h"
-#include "um_idi.h"
-
-#define DBG_MINIMUM  (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT  (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-extern char *DRIVERRELEASE_IDI;
-
-extern void DIVA_DIDD_Read(void *, int);
-extern int diva_user_mode_idi_create_adapter(const DESCRIPTOR *, int);
-extern void diva_user_mode_idi_remove_adapter(int);
-
-static dword notify_handle;
-static DESCRIPTOR DAdapter;
-static DESCRIPTOR MAdapter;
-
-static void no_printf(unsigned char *x, ...)
-{
-	/* dummy debug function */
-}
-
-#include "debuglib.c"
-
-/*
- * stop debug
- */
-static void stop_dbg(void)
-{
-	DbgDeregister();
-	memset(&MAdapter, 0, sizeof(MAdapter));
-	dprintf = no_printf;
-}
-
-typedef struct _udiva_card {
-	struct list_head list;
-	int Id;
-	DESCRIPTOR d;
-} udiva_card;
-
-static LIST_HEAD(cards);
-static diva_os_spin_lock_t ll_lock;
-
-/*
- * find card in list
- */
-static udiva_card *find_card_in_list(DESCRIPTOR *d)
-{
-	udiva_card *card;
-	struct list_head *tmp;
-	diva_os_spin_lock_magic_t old_irql;
-
-	diva_os_enter_spin_lock(&ll_lock, &old_irql, "find card");
-	list_for_each(tmp, &cards) {
-		card = list_entry(tmp, udiva_card, list);
-		if (card->d.request == d->request) {
-			diva_os_leave_spin_lock(&ll_lock, &old_irql,
-						"find card");
-			return (card);
-		}
-	}
-	diva_os_leave_spin_lock(&ll_lock, &old_irql, "find card");
-	return ((udiva_card *) NULL);
-}
-
-/*
- * new card
- */
-static void um_new_card(DESCRIPTOR *d)
-{
-	int adapter_nr = 0;
-	udiva_card *card = NULL;
-	IDI_SYNC_REQ sync_req;
-	diva_os_spin_lock_magic_t old_irql;
-
-	if (!(card = diva_os_malloc(0, sizeof(udiva_card)))) {
-		DBG_ERR(("cannot get buffer for card"));
-		return;
-	}
-	memcpy(&card->d, d, sizeof(DESCRIPTOR));
-	sync_req.xdi_logical_adapter_number.Req = 0;
-	sync_req.xdi_logical_adapter_number.Rc =
-		IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER;
-	card->d.request((ENTITY *)&sync_req);
-	adapter_nr =
-		sync_req.xdi_logical_adapter_number.info.logical_adapter_number;
-	card->Id = adapter_nr;
-	if (!(diva_user_mode_idi_create_adapter(d, adapter_nr))) {
-		diva_os_enter_spin_lock(&ll_lock, &old_irql, "add card");
-		list_add_tail(&card->list, &cards);
-		diva_os_leave_spin_lock(&ll_lock, &old_irql, "add card");
-	} else {
-		DBG_ERR(("could not create user mode idi card %d",
-			 adapter_nr));
-		diva_os_free(0, card);
-	}
-}
-
-/*
- * remove card
- */
-static void um_remove_card(DESCRIPTOR *d)
-{
-	diva_os_spin_lock_magic_t old_irql;
-	udiva_card *card = NULL;
-
-	if (!(card = find_card_in_list(d))) {
-		DBG_ERR(("cannot find card to remove"));
-		return;
-	}
-	diva_user_mode_idi_remove_adapter(card->Id);
-	diva_os_enter_spin_lock(&ll_lock, &old_irql, "remove card");
-	list_del(&card->list);
-	diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove card");
-	DBG_LOG(("idi proc entry removed for card %d", card->Id));
-	diva_os_free(0, card);
-}
-
-/*
- * remove all adapter
- */
-static void __exit remove_all_idi_proc(void)
-{
-	udiva_card *card;
-	diva_os_spin_lock_magic_t old_irql;
-
-rescan:
-	diva_os_enter_spin_lock(&ll_lock, &old_irql, "remove all");
-	if (!list_empty(&cards)) {
-		card = list_entry(cards.next, udiva_card, list);
-		list_del(&card->list);
-		diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove all");
-		diva_user_mode_idi_remove_adapter(card->Id);
-		diva_os_free(0, card);
-		goto rescan;
-	}
-	diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove all");
-}
-
-/*
- * DIDD notify callback
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
-			   int removal)
-{
-	if (adapter->type == IDI_DADAPTER) {
-		DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
-		return (NULL);
-	} else if (adapter->type == IDI_DIMAINT) {
-		if (removal) {
-			stop_dbg();
-		} else {
-			memcpy(&MAdapter, adapter, sizeof(MAdapter));
-			dprintf = (DIVA_DI_PRINTF) MAdapter.request;
-			DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT);
-		}
-	} else if ((adapter->type > 0) && (adapter->type < 16)) {	/* IDI Adapter */
-		if (removal) {
-			um_remove_card(adapter);
-		} else {
-			um_new_card(adapter);
-		}
-	}
-	return (NULL);
-}
-
-/*
- * connect DIDD
- */
-static int __init connect_didd(void)
-{
-	int x = 0;
-	int dadapter = 0;
-	IDI_SYNC_REQ req;
-	DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
-	DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
-	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-		if (DIDD_Table[x].type == IDI_DADAPTER) {	/* DADAPTER found */
-			dadapter = 1;
-			memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
-			req.didd_notify.e.Req = 0;
-			req.didd_notify.e.Rc =
-				IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
-			req.didd_notify.info.callback = (void *)didd_callback;
-			req.didd_notify.info.context = NULL;
-			DAdapter.request((ENTITY *)&req);
-			if (req.didd_notify.e.Rc != 0xff) {
-				stop_dbg();
-				return (0);
-			}
-			notify_handle = req.didd_notify.info.handle;
-		} else if (DIDD_Table[x].type == IDI_DIMAINT) {	/* MAINT found */
-			memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
-			dprintf = (DIVA_DI_PRINTF) MAdapter.request;
-			DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT);
-		} else if ((DIDD_Table[x].type > 0)
-			   && (DIDD_Table[x].type < 16)) {	/* IDI Adapter found */
-			um_new_card(&DIDD_Table[x]);
-		}
-	}
-
-	if (!dadapter) {
-		stop_dbg();
-	}
-
-	return (dadapter);
-}
-
-/*
- *  Disconnect from DIDD
- */
-static void __exit disconnect_didd(void)
-{
-	IDI_SYNC_REQ req;
-
-	stop_dbg();
-
-	req.didd_notify.e.Req = 0;
-	req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
-	req.didd_notify.info.handle = notify_handle;
-	DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * init
- */
-int __init idifunc_init(void)
-{
-	diva_os_initialize_spin_lock(&ll_lock, "idifunc");
-
-	if (diva_user_mode_idi_init()) {
-		DBG_ERR(("init: init failed."));
-		return (0);
-	}
-
-	if (!connect_didd()) {
-		diva_user_mode_idi_finit();
-		DBG_ERR(("init: failed to connect to DIDD."));
-		return (0);
-	}
-	return (1);
-}
-
-/*
- * finit
- */
-void __exit idifunc_finit(void)
-{
-	diva_user_mode_idi_finit();
-	disconnect_didd();
-	remove_all_idi_proc();
-}
diff --git a/drivers/isdn/hardware/eicon/io.c b/drivers/isdn/hardware/eicon/io.c
deleted file mode 100644
index 8851ce5..0000000
--- a/drivers/isdn/hardware/eicon/io.c
+++ /dev/null
@@ -1,852 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "divasync.h"
-#define MIPS_SCOM
-#include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */
-#include "di.h"
-#include "mi_pc.h"
-#include "io.h"
-extern ADAPTER *adapter[MAX_ADAPTER];
-extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-void request(PISDN_ADAPTER, ENTITY *);
-static void pcm_req(PISDN_ADAPTER, ENTITY *);
-/* --------------------------------------------------------------------------
-   local functions
-   -------------------------------------------------------------------------- */
-#define ReqFunc(N)							\
-	static void Request##N(ENTITY *e)				\
-	{ if (IoAdapters[N]) (*IoAdapters[N]->DIRequest)(IoAdapters[N], e); }
-ReqFunc(0)
-ReqFunc(1)
-ReqFunc(2)
-ReqFunc(3)
-ReqFunc(4)
-ReqFunc(5)
-ReqFunc(6)
-ReqFunc(7)
-ReqFunc(8)
-ReqFunc(9)
-ReqFunc(10)
-ReqFunc(11)
-ReqFunc(12)
-ReqFunc(13)
-ReqFunc(14)
-ReqFunc(15)
-IDI_CALL Requests[MAX_ADAPTER] =
-{ &Request0, &Request1, &Request2, &Request3,
-  &Request4, &Request5, &Request6, &Request7,
-  &Request8, &Request9, &Request10, &Request11,
-  &Request12, &Request13, &Request14, &Request15
-};
-/*****************************************************************************/
-/*
-  This array should indicate all new services, that this version of XDI
-  is able to provide to his clients
-*/
-static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ + 1] = {
-	(DIVA_XDI_EXTENDED_FEATURES_VALID       |
-	 DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR    |
-	 DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS    |
-#if defined(DIVA_IDI_RX_DMA)
-	 DIVA_XDI_EXTENDED_FEATURE_CMA          |
-	 DIVA_XDI_EXTENDED_FEATURE_RX_DMA       |
-	 DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA |
-#endif
-	 DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
-	0
-};
-/*****************************************************************************/
-void
-dump_xlog_buffer(PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc)
-{
-	dword   logLen;
-	word *Xlog   = xlogDesc->buf;
-	word  logCnt = xlogDesc->cnt;
-	word  logOut = xlogDesc->out / sizeof(*Xlog);
-	DBG_FTL(("%s: ************* XLOG recovery (%d) *************",
-		 &IoAdapter->Name[0], (int)logCnt))
-		DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
-		for (; logCnt > 0; --logCnt)
-		{
-			if (!GET_WORD(&Xlog[logOut]))
-			{
-				if (--logCnt == 0)
-					break;
-				logOut = 0;
-			}
-			if (GET_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)))
-			{
-				if (logCnt > 2)
-				{
-					DBG_FTL(("Possibly corrupted XLOG: %d entries left",
-						 (int)logCnt))
-						}
-				break;
-			}
-			logLen = (dword)(GET_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog)));
-			DBG_FTL_MXLOG(((char *)&Xlog[logOut + 1], (dword)(logLen - 2)))
-				logOut = (GET_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog);
-		}
-	DBG_FTL(("%s: ***************** end of XLOG *****************",
-		 &IoAdapter->Name[0]))
-		}
-/*****************************************************************************/
-#if defined(XDI_USE_XLOG)
-static char *(ExceptionCauseTable[]) =
-{
-	"Interrupt",
-	"TLB mod /IBOUND",
-	"TLB load /DBOUND",
-	"TLB store",
-	"Address error load",
-	"Address error store",
-	"Instruction load bus error",
-	"Data load/store bus error",
-	"Syscall",
-	"Breakpoint",
-	"Reverd instruction",
-	"Coprocessor unusable",
-	"Overflow",
-	"TRAP",
-	"VCEI",
-	"Floating Point Exception",
-	"CP2",
-	"Reserved 17",
-	"Reserved 18",
-	"Reserved 19",
-	"Reserved 20",
-	"Reserved 21",
-	"Reserved 22",
-	"WATCH",
-	"Reserved 24",
-	"Reserved 25",
-	"Reserved 26",
-	"Reserved 27",
-	"Reserved 28",
-	"Reserved 29",
-	"Reserved 30",
-	"VCED"
-};
-#endif
-void
-dump_trap_frame(PISDN_ADAPTER IoAdapter, byte __iomem *exceptionFrame)
-{
-	MP_XCPTC __iomem *xcept = (MP_XCPTC __iomem *)exceptionFrame;
-	dword    __iomem *regs;
-	regs  = &xcept->regs[0];
-	DBG_FTL(("%s: ***************** CPU TRAPPED *****************",
-		 &IoAdapter->Name[0]))
-		DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
-		DBG_FTL(("Cause: %s",
-			 ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2]))
-		DBG_FTL(("sr    0x%08x cr    0x%08x epc   0x%08x vaddr 0x%08x",
-			 READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr),
-			 READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr)))
-		DBG_FTL(("zero  0x%08x at    0x%08x v0    0x%08x v1    0x%08x",
-			 READ_DWORD(&regs[0]), READ_DWORD(&regs[1]),
-			 READ_DWORD(&regs[2]), READ_DWORD(&regs[3])))
-		DBG_FTL(("a0    0x%08x a1    0x%08x a2    0x%08x a3    0x%08x",
-			 READ_DWORD(&regs[4]), READ_DWORD(&regs[5]),
-			 READ_DWORD(&regs[6]), READ_DWORD(&regs[7])))
-		DBG_FTL(("t0    0x%08x t1    0x%08x t2    0x%08x t3    0x%08x",
-			 READ_DWORD(&regs[8]), READ_DWORD(&regs[9]),
-			 READ_DWORD(&regs[10]), READ_DWORD(&regs[11])))
-		DBG_FTL(("t4    0x%08x t5    0x%08x t6    0x%08x t7    0x%08x",
-			 READ_DWORD(&regs[12]), READ_DWORD(&regs[13]),
-			 READ_DWORD(&regs[14]), READ_DWORD(&regs[15])))
-		DBG_FTL(("s0    0x%08x s1    0x%08x s2    0x%08x s3    0x%08x",
-			 READ_DWORD(&regs[16]), READ_DWORD(&regs[17]),
-			 READ_DWORD(&regs[18]), READ_DWORD(&regs[19])))
-		DBG_FTL(("s4    0x%08x s5    0x%08x s6    0x%08x s7    0x%08x",
-			 READ_DWORD(&regs[20]), READ_DWORD(&regs[21]),
-			 READ_DWORD(&regs[22]), READ_DWORD(&regs[23])))
-		DBG_FTL(("t8    0x%08x t9    0x%08x k0    0x%08x k1    0x%08x",
-			 READ_DWORD(&regs[24]), READ_DWORD(&regs[25]),
-			 READ_DWORD(&regs[26]), READ_DWORD(&regs[27])))
-		DBG_FTL(("gp    0x%08x sp    0x%08x s8    0x%08x ra    0x%08x",
-			 READ_DWORD(&regs[28]), READ_DWORD(&regs[29]),
-			 READ_DWORD(&regs[30]), READ_DWORD(&regs[31])))
-		DBG_FTL(("md    0x%08x|%08x         resvd 0x%08x class 0x%08x",
-			 READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo),
-			 READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass)))
-		}
-/* --------------------------------------------------------------------------
-   Real XDI Request function
-   -------------------------------------------------------------------------- */
-void request(PISDN_ADAPTER IoAdapter, ENTITY *e)
-{
-	byte i;
-	diva_os_spin_lock_magic_t irql;
-/*
- * if the Req field in the entity structure is 0,
- * we treat this request as a special function call
- */
-	if (!e->Req)
-	{
-		IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e;
-		switch (e->Rc)
-		{
-#if defined(DIVA_IDI_RX_DMA)
-		case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: {
-			diva_xdi_dma_descriptor_operation_t *pI = \
-				&syncReq->xdi_dma_descriptor_operation.info;
-			if (!IoAdapter->dma_map) {
-				pI->operation         = -1;
-				pI->descriptor_number = -1;
-				return;
-			}
-			diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "dma_op");
-			if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) {
-				pI->descriptor_number = diva_alloc_dma_map_entry(\
-					(struct _diva_dma_map_entry *)IoAdapter->dma_map);
-				if (pI->descriptor_number >= 0) {
-					dword dma_magic;
-					void *local_addr;
-					diva_get_dma_map_entry(\
-						(struct _diva_dma_map_entry *)IoAdapter->dma_map,
-						pI->descriptor_number,
-						&local_addr, &dma_magic);
-					pI->descriptor_address  = local_addr;
-					pI->descriptor_magic    = dma_magic;
-					pI->operation           = 0;
-				} else {
-					pI->operation           = -1;
-				}
-			} else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
-				   (pI->descriptor_number >= 0)) {
-				diva_free_dma_map_entry((struct _diva_dma_map_entry *)IoAdapter->dma_map,
-							pI->descriptor_number);
-				pI->descriptor_number = -1;
-				pI->operation         = 0;
-			} else {
-				pI->descriptor_number = -1;
-				pI->operation         = -1;
-			}
-			diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "dma_op");
-		} return;
-#endif
-		case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: {
-			diva_xdi_get_logical_adapter_number_s_t *pI = \
-				&syncReq->xdi_logical_adapter_number.info;
-			pI->logical_adapter_number = IoAdapter->ANum;
-			pI->controller = IoAdapter->ControllerNumber;
-			pI->total_controllers = IoAdapter->Properties.Adapters;
-		} return;
-		case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
-			diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
-			memset(&prms, 0x00, sizeof(prms));
-			prms.structure_length = min_t(size_t, sizeof(prms), pI->structure_length);
-			memset(pI, 0x00, pI->structure_length);
-			prms.flag_dynamic_l1_down    = (IoAdapter->capi_cfg.cfg_1 & \
-							DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0;
-			prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \
-							   DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0;
-			memcpy(pI, &prms, prms.structure_length);
-		} return;
-		case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR:
-			syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar;
-			return;
-		case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: {
-			dword i;
-			diva_xdi_get_extended_xdi_features_t *pI =\
-				&syncReq->xdi_extended_features.info;
-			pI->buffer_length_in_bytes &= ~0x80000000;
-			if (pI->buffer_length_in_bytes && pI->features) {
-				memset(pI->features, 0x00, pI->buffer_length_in_bytes);
-			}
-			for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) &&
-				     (i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) {
-				pI->features[i] = extended_xdi_features[i];
-			}
-			if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) ||
-			    (!pI->features)) {
-				pI->buffer_length_in_bytes =\
-					(0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ);
-			}
-		} return;
-		case IDI_SYNC_REQ_XDI_GET_STREAM:
-			if (IoAdapter) {
-				diva_xdi_provide_istream_info(&IoAdapter->a,
-							      &syncReq->xdi_stream_info.info);
-			} else {
-				syncReq->xdi_stream_info.info.provided_service = 0;
-			}
-			return;
-		case IDI_SYNC_REQ_GET_NAME:
-			if (IoAdapter)
-			{
-				strcpy(&syncReq->GetName.name[0], IoAdapter->Name);
-				DBG_TRC(("xdi: Adapter %d / Name '%s'",
-					 IoAdapter->ANum, IoAdapter->Name))
-					return;
-			}
-			syncReq->GetName.name[0] = '\0';
-			break;
-		case IDI_SYNC_REQ_GET_SERIAL:
-			if (IoAdapter)
-			{
-				syncReq->GetSerial.serial = IoAdapter->serialNo;
-				DBG_TRC(("xdi: Adapter %d / SerialNo %ld",
-					 IoAdapter->ANum, IoAdapter->serialNo))
-					return;
-			}
-			syncReq->GetSerial.serial = 0;
-			break;
-		case IDI_SYNC_REQ_GET_CARDTYPE:
-			if (IoAdapter)
-			{
-				syncReq->GetCardType.cardtype = IoAdapter->cardType;
-				DBG_TRC(("xdi: Adapter %d / CardType %ld",
-					 IoAdapter->ANum, IoAdapter->cardType))
-					return;
-			}
-			syncReq->GetCardType.cardtype = 0;
-			break;
-		case IDI_SYNC_REQ_GET_XLOG:
-			if (IoAdapter)
-			{
-				pcm_req(IoAdapter, e);
-				return;
-			}
-			e->Ind = 0;
-			break;
-		case IDI_SYNC_REQ_GET_DBG_XLOG:
-			if (IoAdapter)
-			{
-				pcm_req(IoAdapter, e);
-				return;
-			}
-			e->Ind = 0;
-			break;
-		case IDI_SYNC_REQ_GET_FEATURES:
-			if (IoAdapter)
-			{
-				syncReq->GetFeatures.features =
-					(unsigned short)IoAdapter->features;
-				return;
-			}
-			syncReq->GetFeatures.features = 0;
-			break;
-		case IDI_SYNC_REQ_PORTDRV_HOOK:
-			if (IoAdapter)
-			{
-				DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored"))
-					return;
-			}
-			break;
-		}
-		if (IoAdapter)
-		{
-			return;
-		}
-	}
-	DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc))
-		if (!IoAdapter)
-		{
-			DBG_FTL(("xdi: uninitialized Adapter used - ignore request"))
-				return;
-		}
-	diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
-/*
- * assign an entity
- */
-	if (!(e->Id & 0x1f))
-	{
-		if (IoAdapter->e_count >= IoAdapter->e_max)
-		{
-			DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored",
-				 IoAdapter->e_max))
-				diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
-			return;
-		}
-/*
- * find a new free id
- */
-		for (i = 1; IoAdapter->e_tbl[i].e; ++i);
-		IoAdapter->e_tbl[i].e = e;
-		IoAdapter->e_count++;
-		e->No = (byte)i;
-		e->More = 0;
-		e->RCurrent = 0xff;
-	}
-	else
-	{
-		i = e->No;
-	}
-/*
- * if the entity is still busy, ignore the request call
- */
-	if (e->More & XBUSY)
-	{
-		DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req))
-			if (!IoAdapter->trapped && IoAdapter->trapFnc)
-			{
-				IoAdapter->trapFnc(IoAdapter);
-				/*
-				  Firs trap, also notify user if supported
-				*/
-				if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
-					(*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
-				}
-			}
-		diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
-		return;
-	}
-/*
- * initialize transmit status variables
- */
-	e->More |= XBUSY;
-	e->More &= ~XMOREF;
-	e->XCurrent = 0;
-	e->XOffset = 0;
-/*
- * queue this entity in the adapter request queue
- */
-	IoAdapter->e_tbl[i].next = 0;
-	if (IoAdapter->head)
-	{
-		IoAdapter->e_tbl[IoAdapter->tail].next = i;
-		IoAdapter->tail = i;
-	}
-	else
-	{
-		IoAdapter->head = i;
-		IoAdapter->tail = i;
-	}
-/*
- * queue the DPC to process the request
- */
-	diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
-	diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
-}
-/* ---------------------------------------------------------------------
-   Main DPC routine
-   --------------------------------------------------------------------- */
-void DIDpcRoutine(struct _diva_os_soft_isr *psoft_isr, void *Context) {
-	PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context;
-	ADAPTER *a = &IoAdapter->a;
-	diva_os_atomic_t *pin_dpc = &IoAdapter->in_dpc;
-	if (diva_os_atomic_increment(pin_dpc) == 1) {
-		do {
-			if (IoAdapter->tst_irq(a))
-			{
-				if (!IoAdapter->Unavailable)
-					IoAdapter->dpc(a);
-				IoAdapter->clr_irq(a);
-			}
-			IoAdapter->out(a);
-		} while (diva_os_atomic_decrement(pin_dpc) > 0);
-		/* ----------------------------------------------------------------
-		   Look for XLOG request (cards with indirect addressing)
-		   ---------------------------------------------------------------- */
-		if (IoAdapter->pcm_pending) {
-			struct pc_maint *pcm;
-			diva_os_spin_lock_magic_t OldIrql;
-			diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
-						&OldIrql,
-						"data_dpc");
-			pcm = (struct pc_maint *)IoAdapter->pcm_data;
-			switch (IoAdapter->pcm_pending) {
-			case 1: /* ask card for XLOG */
-				a->ram_out(a, &IoAdapter->pcm->rc, 0);
-				a->ram_out(a, &IoAdapter->pcm->req, pcm->req);
-				IoAdapter->pcm_pending = 2;
-				break;
-			case 2: /* Try to get XLOG from the card */
-				if ((int)(a->ram_in(a, &IoAdapter->pcm->rc))) {
-					a->ram_in_buffer(a, IoAdapter->pcm, pcm, sizeof(*pcm));
-					IoAdapter->pcm_pending = 3;
-				}
-				break;
-			case 3: /* let XDI recovery XLOG */
-				break;
-			}
-			diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
-						&OldIrql,
-						"data_dpc");
-		}
-		/* ---------------------------------------------------------------- */
-	}
-}
-/* --------------------------------------------------------------------------
-   XLOG interface
-   -------------------------------------------------------------------------- */
-static void
-pcm_req(PISDN_ADAPTER IoAdapter, ENTITY *e)
-{
-	diva_os_spin_lock_magic_t OldIrql;
-	int              i, rc;
-	ADAPTER         *a = &IoAdapter->a;
-	struct pc_maint *pcm = (struct pc_maint *)&e->Ind;
-/*
- * special handling of I/O based card interface
- * the memory access isn't an atomic operation !
- */
-	if (IoAdapter->Properties.Card == CARD_MAE)
-	{
-		diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
-					&OldIrql,
-					"data_pcm_1");
-		IoAdapter->pcm_data = (void *)pcm;
-		IoAdapter->pcm_pending = 1;
-		diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
-		diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
-					&OldIrql,
-					"data_pcm_1");
-		for (rc = 0, i = (IoAdapter->trapped ? 3000 : 250); !rc && (i > 0); --i)
-		{
-			diva_os_sleep(1);
-			if (IoAdapter->pcm_pending == 3) {
-				diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
-							&OldIrql,
-							"data_pcm_3");
-				IoAdapter->pcm_pending = 0;
-				IoAdapter->pcm_data    = NULL;
-				diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
-							&OldIrql,
-							"data_pcm_3");
-				return;
-			}
-			diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
-						&OldIrql,
-						"data_pcm_2");
-			diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
-			diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
-						&OldIrql,
-						"data_pcm_2");
-		}
-		diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
-					&OldIrql,
-					"data_pcm_4");
-		IoAdapter->pcm_pending = 0;
-		IoAdapter->pcm_data    = NULL;
-		diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
-					&OldIrql,
-					"data_pcm_4");
-		goto Trapped;
-	}
-/*
- * memory based shared ram is accessible from different
- * processors without disturbing concurrent processes.
- */
-	a->ram_out(a, &IoAdapter->pcm->rc, 0);
-	a->ram_out(a, &IoAdapter->pcm->req, pcm->req);
-	for (i = (IoAdapter->trapped ? 3000 : 250); --i > 0;)
-	{
-		diva_os_sleep(1);
-		rc = (int)(a->ram_in(a, &IoAdapter->pcm->rc));
-		if (rc)
-		{
-			a->ram_in_buffer(a, IoAdapter->pcm, pcm, sizeof(*pcm));
-			return;
-		}
-	}
-Trapped:
-	if (IoAdapter->trapFnc)
-	{
-		int trapped = IoAdapter->trapped;
-		IoAdapter->trapFnc(IoAdapter);
-		/*
-		  Firs trap, also notify user if supported
-		*/
-		if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
-			(*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
-		}
-	}
-}
-/*------------------------------------------------------------------*/
-/* ram access functions for memory mapped cards                     */
-/*------------------------------------------------------------------*/
-byte mem_in(ADAPTER *a, void *addr)
-{
-	byte val;
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	val = READ_BYTE(Base + (unsigned long)addr);
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-	return (val);
-}
-word mem_inw(ADAPTER *a, void *addr)
-{
-	word val;
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	val = READ_WORD((Base + (unsigned long)addr));
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-	return (val);
-}
-void mem_in_dw(ADAPTER *a, void *addr, dword *data, int dwords)
-{
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	while (dwords--) {
-		*data++ = READ_DWORD((Base + (unsigned long)addr));
-		addr += 4;
-	}
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_in_buffer(ADAPTER *a, void *addr, void *buffer, word length)
-{
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	memcpy_fromio(buffer, (Base + (unsigned long)addr), length);
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
-{
-	PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io;
-	IoAdapter->RBuffer.length = mem_inw(a, &RBuffer->length);
-	mem_in_buffer(a, RBuffer->P, IoAdapter->RBuffer.P,
-		      IoAdapter->RBuffer.length);
-	e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer;
-}
-void mem_out(ADAPTER *a, void *addr, byte data)
-{
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	WRITE_BYTE(Base + (unsigned long)addr, data);
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_outw(ADAPTER *a, void *addr, word data)
-{
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	WRITE_WORD((Base + (unsigned long)addr), data);
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_out_dw(ADAPTER *a, void *addr, const dword *data, int dwords)
-{
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	while (dwords--) {
-		WRITE_DWORD((Base + (unsigned long)addr), *data);
-		addr += 4;
-		data++;
-	}
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_out_buffer(ADAPTER *a, void *addr, void *buffer, word length)
-{
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	memcpy_toio((Base + (unsigned long)addr), buffer, length);
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_inc(ADAPTER *a, void *addr)
-{
-	volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
-	byte  x = READ_BYTE(Base + (unsigned long)addr);
-	WRITE_BYTE(Base + (unsigned long)addr, x + 1);
-	DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-/*------------------------------------------------------------------*/
-/* ram access functions for io-mapped cards                         */
-/*------------------------------------------------------------------*/
-byte io_in(ADAPTER *a, void *adr)
-{
-	byte val;
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	outppw(Port + 4, (word)(unsigned long)adr);
-	val = inpp(Port);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-	return (val);
-}
-word io_inw(ADAPTER *a, void *adr)
-{
-	word val;
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	outppw(Port + 4, (word)(unsigned long)adr);
-	val = inppw(Port);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-	return (val);
-}
-void io_in_buffer(ADAPTER *a, void *adr, void *buffer, word len)
-{
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	byte *P = (byte *)buffer;
-	if ((long)adr & 1) {
-		outppw(Port + 4, (word)(unsigned long)adr);
-		*P = inpp(Port);
-		P++;
-		adr = ((byte *) adr) + 1;
-		len--;
-		if (!len) {
-			DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-			return;
-		}
-	}
-	outppw(Port + 4, (word)(unsigned long)adr);
-	inppw_buffer(Port, P, len + 1);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
-{
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	outppw(Port + 4, (word)(unsigned long)RBuffer);
-	((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port);
-	inppw_buffer(Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1);
-	e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_out(ADAPTER *a, void *adr, byte data)
-{
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	outppw(Port + 4, (word)(unsigned long)adr);
-	outpp(Port, data);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_outw(ADAPTER *a, void *adr, word data)
-{
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	outppw(Port + 4, (word)(unsigned long)adr);
-	outppw(Port, data);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_out_buffer(ADAPTER *a, void *adr, void *buffer, word len)
-{
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	byte *P = (byte *)buffer;
-	if ((long)adr & 1) {
-		outppw(Port + 4, (word)(unsigned long)adr);
-		outpp(Port, *P);
-		P++;
-		adr = ((byte *) adr) + 1;
-		len--;
-		if (!len) {
-			DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-			return;
-		}
-	}
-	outppw(Port + 4, (word)(unsigned long)adr);
-	outppw_buffer(Port, P, len + 1);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_inc(ADAPTER *a, void *adr)
-{
-	byte x;
-	byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
-	outppw(Port + 4, (word)(unsigned long)adr);
-	x = inpp(Port);
-	outppw(Port + 4, (word)(unsigned long)adr);
-	outpp(Port, x + 1);
-	DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-/*------------------------------------------------------------------*/
-/* OS specific functions related to queuing of entities             */
-/*------------------------------------------------------------------*/
-void free_entity(ADAPTER *a, byte e_no)
-{
-	PISDN_ADAPTER IoAdapter;
-	diva_os_spin_lock_magic_t irql;
-	IoAdapter = (PISDN_ADAPTER) a->io;
-	diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_free");
-	IoAdapter->e_tbl[e_no].e = NULL;
-	IoAdapter->e_count--;
-	diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_free");
-}
-void assign_queue(ADAPTER *a, byte e_no, word ref)
-{
-	PISDN_ADAPTER IoAdapter;
-	diva_os_spin_lock_magic_t irql;
-	IoAdapter = (PISDN_ADAPTER) a->io;
-	diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_assign");
-	IoAdapter->e_tbl[e_no].assign_ref = ref;
-	IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign;
-	IoAdapter->assign = e_no;
-	diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_assign");
-}
-byte get_assign(ADAPTER *a, word ref)
-{
-	PISDN_ADAPTER IoAdapter;
-	diva_os_spin_lock_magic_t irql;
-	byte e_no;
-	IoAdapter = (PISDN_ADAPTER) a->io;
-	diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
-				&irql,
-				"data_assign_get");
-	for (e_no = (byte)IoAdapter->assign;
-	    e_no && IoAdapter->e_tbl[e_no].assign_ref != ref;
-	    e_no = IoAdapter->e_tbl[e_no].next);
-	diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
-				&irql,
-				"data_assign_get");
-	return e_no;
-}
-void req_queue(ADAPTER *a, byte e_no)
-{
-	PISDN_ADAPTER IoAdapter;
-	diva_os_spin_lock_magic_t irql;
-	IoAdapter = (PISDN_ADAPTER) a->io;
-	diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_q");
-	IoAdapter->e_tbl[e_no].next = 0;
-	if (IoAdapter->head) {
-		IoAdapter->e_tbl[IoAdapter->tail].next = e_no;
-		IoAdapter->tail = e_no;
-	}
-	else {
-		IoAdapter->head = e_no;
-		IoAdapter->tail = e_no;
-	}
-	diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_q");
-}
-byte look_req(ADAPTER *a)
-{
-	PISDN_ADAPTER IoAdapter;
-	IoAdapter = (PISDN_ADAPTER) a->io;
-	return ((byte)IoAdapter->head);
-}
-void next_req(ADAPTER *a)
-{
-	PISDN_ADAPTER IoAdapter;
-	diva_os_spin_lock_magic_t irql;
-	IoAdapter = (PISDN_ADAPTER) a->io;
-	diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_next");
-	IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next;
-	if (!IoAdapter->head) IoAdapter->tail = 0;
-	diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_next");
-}
-/*------------------------------------------------------------------*/
-/* memory map functions                                             */
-/*------------------------------------------------------------------*/
-ENTITY *entity_ptr(ADAPTER *a, byte e_no)
-{
-	PISDN_ADAPTER IoAdapter;
-	IoAdapter = (PISDN_ADAPTER)a->io;
-	return (IoAdapter->e_tbl[e_no].e);
-}
-void *PTR_X(ADAPTER *a, ENTITY *e)
-{
-	return ((void *) e->X);
-}
-void *PTR_R(ADAPTER *a, ENTITY *e)
-{
-	return ((void *) e->R);
-}
-void *PTR_P(ADAPTER *a, ENTITY *e, void *P)
-{
-	return P;
-}
-void CALLBACK(ADAPTER *a, ENTITY *e)
-{
-	if (e && e->callback)
-		e->callback(e);
-}
diff --git a/drivers/isdn/hardware/eicon/io.h b/drivers/isdn/hardware/eicon/io.h
deleted file mode 100644
index 01deced..0000000
--- a/drivers/isdn/hardware/eicon/io.h
+++ /dev/null
@@ -1,308 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_COMMON_IO_H_INC__ /* { */
-#define __DIVA_XDI_COMMON_IO_H_INC__
-/*
-  maximum = 16 adapters
-*/
-#define DI_MAX_LINKS    MAX_ADAPTER
-#define ISDN_MAX_NUM_LEN 60
-/* --------------------------------------------------------------------------
-   structure for quadro card management (obsolete for
-   systems that do provide per card load event)
-   -------------------------------------------------------------------------- */
-typedef struct {
-	dword         Num;
-	DEVICE_NAME   DeviceName[4];
-	PISDN_ADAPTER QuadroAdapter[4];
-} ADAPTER_LIST_ENTRY, *PADAPTER_LIST_ENTRY;
-/* --------------------------------------------------------------------------
-   Special OS memory support structures
-   -------------------------------------------------------------------------- */
-#define MAX_MAPPED_ENTRIES 8
-typedef struct {
-	void *Address;
-	dword    Length;
-} ADAPTER_MEMORY;
-/* --------------------------------------------------------------------------
-   Configuration of XDI clients carried by XDI
-   -------------------------------------------------------------------------- */
-#define DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON      0x01
-#define DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON 0x02
-typedef struct _diva_xdi_capi_cfg {
-	byte cfg_1;
-} diva_xdi_capi_cfg_t;
-/* --------------------------------------------------------------------------
-   Main data structure kept per adapter
-   -------------------------------------------------------------------------- */
-struct _ISDN_ADAPTER {
-	void (*DIRequest)(PISDN_ADAPTER, ENTITY *);
-	int State; /* from NT4 1.srv, a good idea, but  a poor achievement */
-	int Initialized;
-	int RegisteredWithDidd;
-	int Unavailable;  /* callback function possible? */
-	int ResourcesClaimed;
-	int PnpBiosConfigUsed;
-	dword Logging;
-	dword features;
-	char ProtocolIdString[80];
-	/*
-	  remember mapped memory areas
-	*/
-	ADAPTER_MEMORY MappedMemory[MAX_MAPPED_ENTRIES];
-	CARD_PROPERTIES Properties;
-	dword cardType;
-	dword protocol_id;       /* configured protocol identifier */
-	char protocol_name[8];  /* readable name of protocol */
-	dword BusType;
-	dword BusNumber;
-	dword slotNumber;
-	dword slotId;
-	dword ControllerNumber;  /* for QUADRO cards only */
-	PISDN_ADAPTER MultiMaster;       /* for 4-BRI card only - use MultiMaster or QuadroList */
-	PADAPTER_LIST_ENTRY QuadroList;        /* for QUADRO card  only */
-	PDEVICE_OBJECT DeviceObject;
-	dword DeviceId;
-	diva_os_adapter_irq_info_t irq_info;
-	dword volatile IrqCount;
-	int trapped;
-	dword DspCodeBaseAddr;
-	dword MaxDspCodeSize;
-	dword downloadAddr;
-	dword DspCodeBaseAddrTable[4]; /* add. for MultiMaster */
-	dword MaxDspCodeSizeTable[4]; /* add. for MultiMaster */
-	dword downloadAddrTable[4]; /* add. for MultiMaster */
-	dword MemoryBase;
-	dword MemorySize;
-	byte __iomem *Address;
-	byte __iomem *Config;
-	byte __iomem *Control;
-	byte __iomem *reset;
-	byte __iomem *port;
-	byte __iomem *ram;
-	byte __iomem *cfg;
-	byte __iomem *prom;
-	byte __iomem *ctlReg;
-	struct pc_maint  *pcm;
-	diva_os_dependent_devica_name_t os_name;
-	byte Name[32];
-	dword serialNo;
-	dword ANum;
-	dword ArchiveType; /* ARCHIVE_TYPE_NONE ..._SINGLE ..._USGEN ..._MULTI */
-	char *ProtocolSuffix; /* internal protocolfile table */
-	char Archive[32];
-	char Protocol[32];
-	char AddDownload[32]; /* Dsp- or other additional download files */
-	char Oad1[ISDN_MAX_NUM_LEN];
-	char Osa1[ISDN_MAX_NUM_LEN];
-	char Oad2[ISDN_MAX_NUM_LEN];
-	char Osa2[ISDN_MAX_NUM_LEN];
-	char Spid1[ISDN_MAX_NUM_LEN];
-	char Spid2[ISDN_MAX_NUM_LEN];
-	byte nosig;
-	byte BriLayer2LinkCount; /* amount of TEI's that adapter will support in P2MP mode */
-	dword Channels;
-	dword tei;
-	dword nt2;
-	dword TerminalCount;
-	dword WatchDog;
-	dword Permanent;
-	dword BChMask; /* B channel mask for unchannelized modes */
-	dword StableL2;
-	dword DidLen;
-	dword NoOrderCheck;
-	dword ForceLaw; /* VoiceCoding - default:0, a-law: 1, my-law: 2 */
-	dword SigFlags;
-	dword LowChannel;
-	dword NoHscx30;
-	dword ProtVersion;
-	dword crc4;
-	dword L1TristateOrQsig; /* enable Layer 1 Tristate (bit 2)Or Qsig params (bit 0,1)*/
-	dword InitialDspInfo;
-	dword ModemGuardTone;
-	dword ModemMinSpeed;
-	dword ModemMaxSpeed;
-	dword ModemOptions;
-	dword ModemOptions2;
-	dword ModemNegotiationMode;
-	dword ModemModulationsMask;
-	dword ModemTransmitLevel;
-	dword FaxOptions;
-	dword FaxMaxSpeed;
-	dword Part68LevelLimiter;
-	dword UsEktsNumCallApp;
-	byte UsEktsFeatAddConf;
-	byte UsEktsFeatRemoveConf;
-	byte UsEktsFeatCallTransfer;
-	byte UsEktsFeatMsgWaiting;
-	byte QsigDialect;
-	byte ForceVoiceMailAlert;
-	byte DisableAutoSpid;
-	byte ModemCarrierWaitTimeSec;
-	byte ModemCarrierLossWaitTimeTenthSec;
-	byte PiafsLinkTurnaroundInFrames;
-	byte DiscAfterProgress;
-	byte AniDniLimiter[3];
-	byte TxAttenuation;  /* PRI/E1 only: attenuate TX signal */
-	word QsigFeatures;
-	dword GenerateRingtone;
-	dword SupplementaryServicesFeatures;
-	dword R2Dialect;
-	dword R2CasOptions;
-	dword FaxV34Options;
-	dword DisabledDspMask;
-	dword AdapterTestMask;
-	dword DspImageLength;
-	word AlertToIn20mSecTicks;
-	word ModemEyeSetup;
-	byte R2CtryLength;
-	byte CCBSRelTimer;
-	byte *PcCfgBufferFile;/* flexible parameter via file */
-	byte *PcCfgBuffer; /* flexible parameter via multistring */
-	diva_os_dump_file_t dump_file; /* dump memory to file at lowest irq level */
-	diva_os_board_trace_t board_trace; /* traces from the board */
-	diva_os_spin_lock_t isr_spin_lock;
-	diva_os_spin_lock_t data_spin_lock;
-	diva_os_soft_isr_t req_soft_isr;
-	diva_os_soft_isr_t isr_soft_isr;
-	diva_os_atomic_t  in_dpc;
-	PBUFFER RBuffer;        /* Copy of receive lookahead buffer */
-	word e_max;
-	word e_count;
-	E_INFO *e_tbl;
-	word assign;         /* list of pending ASSIGNs  */
-	word head;           /* head of request queue    */
-	word tail;           /* tail of request queue    */
-	ADAPTER a;             /* not a separate structure */
-	void (*out)(ADAPTER *a);
-	byte (*dpc)(ADAPTER *a);
-	byte (*tst_irq)(ADAPTER *a);
-	void (*clr_irq)(ADAPTER *a);
-	int (*load)(PISDN_ADAPTER);
-	int (*mapmem)(PISDN_ADAPTER);
-	int (*chkIrq)(PISDN_ADAPTER);
-	void (*disIrq)(PISDN_ADAPTER);
-	void (*start)(PISDN_ADAPTER);
-	void (*stop)(PISDN_ADAPTER);
-	void (*rstFnc)(PISDN_ADAPTER);
-	void (*trapFnc)(PISDN_ADAPTER);
-	dword (*DetectDsps)(PISDN_ADAPTER);
-	void (*os_trap_nfy_Fnc)(PISDN_ADAPTER, dword);
-	diva_os_isr_callback_t diva_isr_handler;
-	dword sdram_bar;  /* must be 32 bit */
-	dword fpga_features;
-	volatile int pcm_pending;
-	volatile void *pcm_data;
-	diva_xdi_capi_cfg_t capi_cfg;
-	dword tasks;
-	void *dma_map;
-	int (*DivaAdapterTestProc)(PISDN_ADAPTER);
-	void *AdapterTestMemoryStart;
-	dword AdapterTestMemoryLength;
-	const byte *cfg_lib_memory_init;
-	dword cfg_lib_memory_init_length;
-};
-/* ---------------------------------------------------------------------
-   Entity table
-   --------------------------------------------------------------------- */
-struct e_info_s {
-	ENTITY *e;
-	byte          next;                   /* chaining index           */
-	word          assign_ref;             /* assign reference         */
-};
-/* ---------------------------------------------------------------------
-   S-cards shared ram structure for loading
-   --------------------------------------------------------------------- */
-struct s_load {
-	byte ctrl;
-	byte card;
-	byte msize;
-	byte fill0;
-	word ebit;
-	word elocl;
-	word eloch;
-	byte reserved[20];
-	word signature;
-	byte fill[224];
-	byte b[256];
-};
-#define PR_RAM  ((struct pr_ram *)0)
-#define RAM ((struct dual *)0)
-/* ---------------------------------------------------------------------
-   platform specific conversions
-   --------------------------------------------------------------------- */
-extern void *PTR_P(ADAPTER *a, ENTITY *e, void *P);
-extern void *PTR_X(ADAPTER *a, ENTITY *e);
-extern void *PTR_R(ADAPTER *a, ENTITY *e);
-extern void CALLBACK(ADAPTER *a, ENTITY *e);
-extern void set_ram(void **adr_ptr);
-/* ---------------------------------------------------------------------
-   ram access functions for io mapped cards
-   --------------------------------------------------------------------- */
-byte io_in(ADAPTER *a, void *adr);
-word io_inw(ADAPTER *a, void *adr);
-void io_in_buffer(ADAPTER *a, void *adr, void *P, word length);
-void io_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e);
-void io_out(ADAPTER *a, void *adr, byte data);
-void io_outw(ADAPTER *a, void *adr, word data);
-void io_out_buffer(ADAPTER *a, void *adr, void *P, word length);
-void io_inc(ADAPTER *a, void *adr);
-void bri_in_buffer(PISDN_ADAPTER IoAdapter, dword Pos,
-		   void *Buf, dword Len);
-int bri_out_buffer(PISDN_ADAPTER IoAdapter, dword Pos,
-		   void *Buf, dword Len, int Verify);
-/* ---------------------------------------------------------------------
-   ram access functions for memory mapped cards
-   --------------------------------------------------------------------- */
-byte mem_in(ADAPTER *a, void *adr);
-word mem_inw(ADAPTER *a, void *adr);
-void mem_in_buffer(ADAPTER *a, void *adr, void *P, word length);
-void mem_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e);
-void mem_out(ADAPTER *a, void *adr, byte data);
-void mem_outw(ADAPTER *a, void *adr, word data);
-void mem_out_buffer(ADAPTER *a, void *adr, void *P, word length);
-void mem_inc(ADAPTER *a, void *adr);
-void mem_in_dw(ADAPTER *a, void *addr, dword *data, int dwords);
-void mem_out_dw(ADAPTER *a, void *addr, const dword *data, int dwords);
-/* ---------------------------------------------------------------------
-   functions exported by io.c
-   --------------------------------------------------------------------- */
-extern IDI_CALL Requests[MAX_ADAPTER];
-extern void     DIDpcRoutine(struct _diva_os_soft_isr *psoft_isr,
-			     void *context);
-extern void     request(PISDN_ADAPTER, ENTITY *);
-/* ---------------------------------------------------------------------
-   trapFn helpers, used to recover debug trace from dead card
-   --------------------------------------------------------------------- */
-typedef struct {
-	word *buf;
-	word  cnt;
-	word  out;
-} Xdesc;
-extern void dump_trap_frame(PISDN_ADAPTER IoAdapter, byte __iomem *exception);
-extern void dump_xlog_buffer(PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc);
-/* --------------------------------------------------------------------- */
-#endif  /* } __DIVA_XDI_COMMON_IO_H_INC__ */
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
deleted file mode 100644
index 045bda5..0000000
--- a/drivers/isdn/hardware/eicon/istream.c
+++ /dev/null
@@ -1,226 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#if defined(DIVA_ISTREAM) /* { */
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "di.h"
-#if !defined USE_EXTENDED_DEBUGS
-#include "dimaint.h"
-#else
-#define dprintf
-#endif
-#include "dfifo.h"
-int diva_istream_write(void *context,
-		       int   Id,
-		       void *data,
-		       int length,
-		       int final,
-		       byte usr1,
-		       byte usr2);
-int diva_istream_read(void *context,
-		      int Id,
-		      void *data,
-		      int max_length,
-		      int *final,
-		      byte *usr1,
-		      byte *usr2);
-/* -------------------------------------------------------------------
-   Does provide iStream interface to the client
-   ------------------------------------------------------------------- */
-void diva_xdi_provide_istream_info(ADAPTER *a,
-				   diva_xdi_stream_interface_t *pi) {
-	pi->provided_service = 0;
-}
-/* ------------------------------------------------------------------
-   Does write the data from caller's buffer to the card's
-   stream interface.
-   If synchronous service was requested, then function
-   does return amount of data written to stream.
-   'final' does indicate that piece of data to be written is
-   final part of frame (necessary only by structured datatransfer)
-   return  0 if zero lengh packet was written
-   return -1 if stream is full
-   ------------------------------------------------------------------ */
-int diva_istream_write(void *context,
-		       int Id,
-		       void *data,
-		       int length,
-		       int final,
-		       byte usr1,
-		       byte usr2) {
-	ADAPTER *a = (ADAPTER *)context;
-	int written = 0, to_write = -1;
-	char tmp[4];
-	byte *data_ptr = (byte *)data;
-	for (;;) {
-		a->ram_in_dw(a,
-#ifdef PLATFORM_GT_32BIT
-			      ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]),
-#else
-			      (void *)(a->tx_stream[Id] + a->tx_pos[Id]),
-#endif
-			      (dword *)&tmp[0],
-			      1);
-		if (tmp[0] & DIVA_DFIFO_READY) { /* No free blocks more */
-			if (to_write < 0)
-				return (-1); /* was not able to write       */
-			break;     /* only part of message was written */
-		}
-		to_write = min(length, DIVA_DFIFO_DATA_SZ);
-		if (to_write) {
-			a->ram_out_buffer(a,
-#ifdef PLATFORM_GT_32BIT
-					   ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id] + 4),
-#else
-					   (void *)(a->tx_stream[Id] + a->tx_pos[Id] + 4),
-#endif
-					   data_ptr,
-					   (word)to_write);
-			length  -= to_write;
-			written  += to_write;
-			data_ptr += to_write;
-		}
-		tmp[1] = (char)to_write;
-		tmp[0] = (tmp[0] & DIVA_DFIFO_WRAP) |
-			DIVA_DFIFO_READY |
-			((!length && final) ? DIVA_DFIFO_LAST : 0);
-		if (tmp[0] & DIVA_DFIFO_LAST) {
-			tmp[2] = usr1;
-			tmp[3] = usr2;
-		}
-		a->ram_out_dw(a,
-#ifdef PLATFORM_GT_32BIT
-			       ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]),
-#else
-			       (void *)(a->tx_stream[Id] + a->tx_pos[Id]),
-#endif
-			       (dword *)&tmp[0],
-			       1);
-		if (tmp[0] & DIVA_DFIFO_WRAP) {
-			a->tx_pos[Id]  = 0;
-		} else {
-			a->tx_pos[Id] += DIVA_DFIFO_STEP;
-		}
-		if (!length) {
-			break;
-		}
-	}
-	return (written);
-}
-/* -------------------------------------------------------------------
-   In case of SYNCRONOUS service:
-   Does write data from stream in caller's buffer.
-   Does return amount of data written to buffer
-   Final flag is set on return if last part of structured frame
-   was received
-   return 0  if zero packet was received
-   return -1 if stream is empty
-   return -2 if read buffer does not profide sufficient space
-   to accommodate entire segment
-   max_length should be at least 68 bytes
-   ------------------------------------------------------------------- */
-int diva_istream_read(void *context,
-		      int Id,
-		      void *data,
-		      int max_length,
-		      int *final,
-		      byte *usr1,
-		      byte *usr2) {
-	ADAPTER *a = (ADAPTER *)context;
-	int read = 0, to_read = -1;
-	char tmp[4];
-	byte *data_ptr = (byte *)data;
-	*final = 0;
-	for (;;) {
-		a->ram_in_dw(a,
-#ifdef PLATFORM_GT_32BIT
-			      ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]),
-#else
-			      (void *)(a->rx_stream[Id] + a->rx_pos[Id]),
-#endif
-			      (dword *)&tmp[0],
-			      1);
-		if (tmp[1] > max_length) {
-			if (to_read < 0)
-				return (-2); /* was not able to read */
-			break;
-		}
-		if (!(tmp[0] & DIVA_DFIFO_READY)) {
-			if (to_read < 0)
-				return (-1); /* was not able to read */
-			break;
-		}
-		to_read = min(max_length, (int)tmp[1]);
-		if (to_read) {
-			a->ram_in_buffer(a,
-#ifdef PLATFORM_GT_32BIT
-					 ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id] + 4),
-#else
-					 (void *)(a->rx_stream[Id] + a->rx_pos[Id] + 4),
-#endif
-					 data_ptr,
-					 (word)to_read);
-			max_length -= to_read;
-			read     += to_read;
-			data_ptr  += to_read;
-		}
-		if (tmp[0] & DIVA_DFIFO_LAST) {
-			*final = 1;
-		}
-		tmp[0] &= DIVA_DFIFO_WRAP;
-		a->ram_out_dw(a,
-#ifdef PLATFORM_GT_32BIT
-			      ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]),
-#else
-			      (void *)(a->rx_stream[Id] + a->rx_pos[Id]),
-#endif
-			      (dword *)&tmp[0],
-			      1);
-		if (tmp[0] & DIVA_DFIFO_WRAP) {
-			a->rx_pos[Id]  = 0;
-		} else {
-			a->rx_pos[Id] += DIVA_DFIFO_STEP;
-		}
-		if (*final) {
-			if (usr1)
-				*usr1 = tmp[2];
-			if (usr2)
-				*usr2 = tmp[3];
-			break;
-		}
-	}
-	return (read);
-}
-/* ---------------------------------------------------------------------
-   Does check if one of streams had caused interrupt and does
-   wake up corresponding application
-   --------------------------------------------------------------------- */
-void pr_stream(ADAPTER *a) {
-}
-#endif /* } */
diff --git a/drivers/isdn/hardware/eicon/kst_ifc.h b/drivers/isdn/hardware/eicon/kst_ifc.h
deleted file mode 100644
index 894fdfd..0000000
--- a/drivers/isdn/hardware/eicon/kst_ifc.h
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2000.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_EICON_TRACE_API__
-#define __DIVA_EICON_TRACE_API__
-
-#define DIVA_TRACE_LINE_TYPE_LEN 64
-#define DIVA_TRACE_IE_LEN        64
-#define DIVA_TRACE_FAX_PRMS_LEN  128
-
-typedef struct _diva_trace_ie {
-	byte length;
-	byte data[DIVA_TRACE_IE_LEN];
-} diva_trace_ie_t;
-
-/*
-  Structure used to represent "State\\BX\\Modem" directory
-  to user.
-*/
-typedef struct _diva_trace_modem_state {
-	dword	ChannelNumber;
-
-	dword	Event;
-
-	dword	Norm;
-
-	dword Options; /* Options received from Application */
-
-	dword	TxSpeed;
-	dword	RxSpeed;
-
-	dword RoundtripMsec;
-
-	dword SymbolRate;
-
-	int		RxLeveldBm;
-	int		EchoLeveldBm;
-
-	dword	SNRdb;
-	dword MAE;
-
-	dword LocalRetrains;
-	dword RemoteRetrains;
-	dword LocalResyncs;
-	dword RemoteResyncs;
-
-	dword DiscReason;
-
-} diva_trace_modem_state_t;
-
-/*
-  Representation of "State\\BX\\FAX" directory
-*/
-typedef struct _diva_trace_fax_state {
-	dword	ChannelNumber;
-	dword Event;
-	dword Page_Counter;
-	dword Features;
-	char Station_ID[DIVA_TRACE_FAX_PRMS_LEN];
-	char Subaddress[DIVA_TRACE_FAX_PRMS_LEN];
-	char Password[DIVA_TRACE_FAX_PRMS_LEN];
-	dword Speed;
-	dword Resolution;
-	dword Paper_Width;
-	dword Paper_Length;
-	dword Scanline_Time;
-	dword Disc_Reason;
-	dword	dummy;
-} diva_trace_fax_state_t;
-
-/*
-  Structure used to represent Interface State in the abstract
-  and interface/D-channel protocol independent form.
-*/
-typedef struct _diva_trace_interface_state {
-	char Layer1[DIVA_TRACE_LINE_TYPE_LEN];
-	char Layer2[DIVA_TRACE_LINE_TYPE_LEN];
-} diva_trace_interface_state_t;
-
-typedef struct _diva_incoming_call_statistics {
-	dword Calls;
-	dword Connected;
-	dword User_Busy;
-	dword Call_Rejected;
-	dword Wrong_Number;
-	dword Incompatible_Dst;
-	dword Out_of_Order;
-	dword Ignored;
-} diva_incoming_call_statistics_t;
-
-typedef struct _diva_outgoing_call_statistics {
-	dword Calls;
-	dword Connected;
-	dword User_Busy;
-	dword No_Answer;
-	dword Wrong_Number;
-	dword Call_Rejected;
-	dword Other_Failures;
-} diva_outgoing_call_statistics_t;
-
-typedef struct _diva_modem_call_statistics {
-	dword Disc_Normal;
-	dword Disc_Unspecified;
-	dword Disc_Busy_Tone;
-	dword Disc_Congestion;
-	dword Disc_Carr_Wait;
-	dword Disc_Trn_Timeout;
-	dword Disc_Incompat;
-	dword Disc_Frame_Rej;
-	dword Disc_V42bis;
-} diva_modem_call_statistics_t;
-
-typedef struct _diva_fax_call_statistics {
-	dword Disc_Normal;
-	dword Disc_Not_Ident;
-	dword Disc_No_Response;
-	dword Disc_Retries;
-	dword Disc_Unexp_Msg;
-	dword Disc_No_Polling;
-	dword Disc_Training;
-	dword Disc_Unexpected;
-	dword Disc_Application;
-	dword Disc_Incompat;
-	dword Disc_No_Command;
-	dword Disc_Long_Msg;
-	dword Disc_Supervisor;
-	dword Disc_SUB_SEP_PWD;
-	dword Disc_Invalid_Msg;
-	dword Disc_Page_Coding;
-	dword Disc_App_Timeout;
-	dword Disc_Unspecified;
-} diva_fax_call_statistics_t;
-
-typedef struct _diva_prot_statistics {
-	dword X_Frames;
-	dword X_Bytes;
-	dword X_Errors;
-	dword R_Frames;
-	dword R_Bytes;
-	dword R_Errors;
-} diva_prot_statistics_t;
-
-typedef struct _diva_ifc_statistics {
-	diva_incoming_call_statistics_t	inc;
-	diva_outgoing_call_statistics_t outg;
-	diva_modem_call_statistics_t mdm;
-	diva_fax_call_statistics_t fax;
-	diva_prot_statistics_t b1;
-	diva_prot_statistics_t b2;
-	diva_prot_statistics_t d1;
-	diva_prot_statistics_t d2;
-} diva_ifc_statistics_t;
-
-/*
-  Structure used to represent "State\\BX" directory
-  to user.
-*/
-typedef struct _diva_trace_line_state {
-	dword	ChannelNumber;
-
-	char Line[DIVA_TRACE_LINE_TYPE_LEN];
-
-	char Framing[DIVA_TRACE_LINE_TYPE_LEN];
-
-	char Layer2[DIVA_TRACE_LINE_TYPE_LEN];
-	char Layer3[DIVA_TRACE_LINE_TYPE_LEN];
-
-	char RemoteAddress[DIVA_TRACE_LINE_TYPE_LEN];
-	char RemoteSubAddress[DIVA_TRACE_LINE_TYPE_LEN];
-
-	char LocalAddress[DIVA_TRACE_LINE_TYPE_LEN];
-	char LocalSubAddress[DIVA_TRACE_LINE_TYPE_LEN];
-
-	diva_trace_ie_t call_BC;
-	diva_trace_ie_t call_HLC;
-	diva_trace_ie_t call_LLC;
-
-	dword Charges;
-
-	dword CallReference;
-
-	dword LastDisconnecCause;
-
-	char UserID[DIVA_TRACE_LINE_TYPE_LEN];
-
-	diva_trace_modem_state_t modem;
-	diva_trace_fax_state_t fax;
-
-	diva_trace_interface_state_t *pInterface;
-
-	diva_ifc_statistics_t *pInterfaceStat;
-
-} diva_trace_line_state_t;
-
-#define DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE             ('l')
-#define DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE            ('m')
-#define DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE              ('f')
-#define DIVA_SUPER_TRACE_INTERFACE_CHANGE               ('i')
-#define DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE             ('s')
-#define DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE         ('M')
-#define DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE         ('F')
-
-struct _diva_strace_library_interface;
-typedef void (*diva_trace_channel_state_change_proc_t)(void *user_context,
-						       struct _diva_strace_library_interface *hLib,
-						       int Adapter,
-						       diva_trace_line_state_t *channel, int notify_subject);
-typedef void (*diva_trace_channel_trace_proc_t)(void *user_context,
-						struct _diva_strace_library_interface *hLib,
-						int Adapter, void *xlog_buffer, int length);
-typedef void (*diva_trace_error_proc_t)(void *user_context,
-					struct _diva_strace_library_interface *hLib,
-					int Adapter,
-					int error, const char *file, int line);
-
-/*
-  This structure creates interface from user to library
-*/
-typedef struct _diva_trace_library_user_interface {
-	void *user_context;
-	diva_trace_channel_state_change_proc_t notify_proc;
-	diva_trace_channel_trace_proc_t trace_proc;
-	diva_trace_error_proc_t error_notify_proc;
-} diva_trace_library_user_interface_t;
-
-/*
-  Interface from Library to User
-*/
-typedef int (*DivaSTraceLibraryStart_proc_t)(void *hLib);
-typedef int (*DivaSTraceLibraryFinit_proc_t)(void *hLib);
-typedef int (*DivaSTraceMessageInput_proc_t)(void *hLib);
-typedef void* (*DivaSTraceGetHandle_proc_t)(void *hLib);
-
-/*
-  Turn Audio Tap trace on/off
-  Channel should be in the range 1 ... Number of Channels
-*/
-typedef int (*DivaSTraceSetAudioTap_proc_t)(void *hLib, int Channel, int on);
-
-/*
-  Turn B-channel trace on/off
-  Channel should be in the range 1 ... Number of Channels
-*/
-typedef int (*DivaSTraceSetBChannel_proc_t)(void *hLib, int Channel, int on);
-
-/*
-  Turn	D-channel (Layer1/Layer2/Layer3) trace on/off
-  Layer1 - All D-channel frames received/sent over the interface
-  inclusive Layer 2 headers, Layer 2 frames and TEI management frames
-  Layer2 - Events from LAPD protocol instance with SAPI of signalling protocol
-  Layer3 - All D-channel frames addressed to assigned to the card TEI and
-  SAPI of signalling protocol, and signalling protocol events.
-*/
-typedef int (*DivaSTraceSetDChannel_proc_t)(void *hLib, int on);
-
-/*
-  Get overall card statistics
-*/
-typedef int (*DivaSTraceGetOutgoingCallStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetIncomingCallStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetModemStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetFaxStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetBLayer1Statistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetBLayer2Statistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetDLayer1Statistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetDLayer2Statistics_proc_t)(void *hLib);
-
-/*
-  Call control
-*/
-typedef int (*DivaSTraceClearCall_proc_t)(void *hLib, int Channel);
-
-typedef struct _diva_strace_library_interface {
-	void *hLib;
-	DivaSTraceLibraryStart_proc_t DivaSTraceLibraryStart;
-	DivaSTraceLibraryStart_proc_t DivaSTraceLibraryStop;
-	DivaSTraceLibraryFinit_proc_t DivaSTraceLibraryFinit;
-	DivaSTraceMessageInput_proc_t DivaSTraceMessageInput;
-	DivaSTraceGetHandle_proc_t DivaSTraceGetHandle;
-	DivaSTraceSetAudioTap_proc_t DivaSTraceSetAudioTap;
-	DivaSTraceSetBChannel_proc_t DivaSTraceSetBChannel;
-	DivaSTraceSetDChannel_proc_t DivaSTraceSetDChannel;
-	DivaSTraceSetDChannel_proc_t DivaSTraceSetInfo;
-	DivaSTraceGetOutgoingCallStatistics_proc_t \
-	DivaSTraceGetOutgoingCallStatistics;
-	DivaSTraceGetIncomingCallStatistics_proc_t \
-	DivaSTraceGetIncomingCallStatistics;
-	DivaSTraceGetModemStatistics_proc_t \
-	DivaSTraceGetModemStatistics;
-	DivaSTraceGetFaxStatistics_proc_t \
-	DivaSTraceGetFaxStatistics;
-	DivaSTraceGetBLayer1Statistics_proc_t \
-	DivaSTraceGetBLayer1Statistics;
-	DivaSTraceGetBLayer2Statistics_proc_t \
-	DivaSTraceGetBLayer2Statistics;
-	DivaSTraceGetDLayer1Statistics_proc_t \
-	DivaSTraceGetDLayer1Statistics;
-	DivaSTraceGetDLayer2Statistics_proc_t \
-	DivaSTraceGetDLayer2Statistics;
-	DivaSTraceClearCall_proc_t DivaSTraceClearCall;
-} diva_strace_library_interface_t;
-
-/*
-  Create and return Library interface
-*/
-diva_strace_library_interface_t *DivaSTraceLibraryCreateInstance(int Adapter,
-								 const diva_trace_library_user_interface_t *user_proc,
-								 byte *pmem);
-dword DivaSTraceGetMemotyRequirement(int channels);
-
-#define DIVA_MAX_ADAPTERS  64
-#define DIVA_MAX_LINES     32
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/maintidi.c b/drivers/isdn/hardware/eicon/maintidi.c
deleted file mode 100644
index 2ee789f..0000000
--- a/drivers/isdn/hardware/eicon/maintidi.c
+++ /dev/null
@@ -1,2194 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2000.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "kst_ifc.h"
-#include "di_defs.h"
-#include "maintidi.h"
-#include "pc.h"
-#include "man_defs.h"
-
-
-extern void diva_mnt_internal_dprintf(dword drv_id, dword type, char *p, ...);
-
-#define MODEM_PARSE_ENTRIES  16 /* amount of variables of interest */
-#define FAX_PARSE_ENTRIES    12 /* amount of variables of interest */
-#define LINE_PARSE_ENTRIES   15 /* amount of variables of interest */
-#define STAT_PARSE_ENTRIES   70 /* amount of variables of interest */
-
-/*
-  LOCAL FUNCTIONS
-*/
-static int DivaSTraceLibraryStart(void *hLib);
-static int DivaSTraceLibraryStop(void *hLib);
-static int SuperTraceLibraryFinit(void *hLib);
-static void *SuperTraceGetHandle(void *hLib);
-static int SuperTraceMessageInput(void *hLib);
-static int SuperTraceSetAudioTap(void *hLib, int Channel, int on);
-static int SuperTraceSetBChannel(void *hLib, int Channel, int on);
-static int SuperTraceSetDChannel(void *hLib, int on);
-static int SuperTraceSetInfo(void *hLib, int on);
-static int SuperTraceClearCall(void *hLib, int Channel);
-static int SuperTraceGetOutgoingCallStatistics(void *hLib);
-static int SuperTraceGetIncomingCallStatistics(void *hLib);
-static int SuperTraceGetModemStatistics(void *hLib);
-static int SuperTraceGetFaxStatistics(void *hLib);
-static int SuperTraceGetBLayer1Statistics(void *hLib);
-static int SuperTraceGetBLayer2Statistics(void *hLib);
-static int SuperTraceGetDLayer1Statistics(void *hLib);
-static int SuperTraceGetDLayer2Statistics(void *hLib);
-
-/*
-  LOCAL FUNCTIONS
-*/
-static int ScheduleNextTraceRequest(diva_strace_context_t *pLib);
-static int process_idi_event(diva_strace_context_t *pLib,
-			     diva_man_var_header_t *pVar);
-static int process_idi_info(diva_strace_context_t *pLib,
-			    diva_man_var_header_t *pVar);
-static int diva_modem_event(diva_strace_context_t *pLib, int Channel);
-static int diva_fax_event(diva_strace_context_t *pLib, int Channel);
-static int diva_line_event(diva_strace_context_t *pLib, int Channel);
-static int diva_modem_info(diva_strace_context_t *pLib,
-			   int Channel,
-			   diva_man_var_header_t *pVar);
-static int diva_fax_info(diva_strace_context_t *pLib,
-			 int Channel,
-			 diva_man_var_header_t *pVar);
-static int diva_line_info(diva_strace_context_t *pLib,
-			  int Channel,
-			  diva_man_var_header_t *pVar);
-static int diva_ifc_statistics(diva_strace_context_t *pLib,
-			       diva_man_var_header_t *pVar);
-static diva_man_var_header_t *get_next_var(diva_man_var_header_t *pVar);
-static diva_man_var_header_t *find_var(diva_man_var_header_t *pVar,
-				       const char *name);
-static int diva_strace_read_int(diva_man_var_header_t *pVar, int *var);
-static int diva_strace_read_uint(diva_man_var_header_t *pVar, dword *var);
-static int diva_strace_read_asz(diva_man_var_header_t *pVar, char *var);
-static int diva_strace_read_asc(diva_man_var_header_t *pVar, char *var);
-static int diva_strace_read_ie(diva_man_var_header_t *pVar,
-			       diva_trace_ie_t *var);
-static void diva_create_parse_table(diva_strace_context_t *pLib);
-static void diva_trace_error(diva_strace_context_t *pLib,
-			     int error, const char *file, int line);
-static void diva_trace_notify_user(diva_strace_context_t *pLib,
-				   int Channel,
-				   int notify_subject);
-static int diva_trace_read_variable(diva_man_var_header_t *pVar,
-				    void *variable);
-
-/*
-  Initialize the library and return context
-  of the created trace object that will represent
-  the IDI adapter.
-  Return 0 on error.
-*/
-diva_strace_library_interface_t *DivaSTraceLibraryCreateInstance(int Adapter,
-								 const diva_trace_library_user_interface_t *user_proc,
-								 byte *pmem) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)pmem;
-	int i;
-
-	if (!pLib) {
-		return NULL;
-	}
-
-	pmem += sizeof(*pLib);
-	memset(pLib, 0x00, sizeof(*pLib));
-
-	pLib->Adapter  = Adapter;
-
-	/*
-	  Set up Library Interface
-	*/
-	pLib->instance.hLib                                = pLib;
-	pLib->instance.DivaSTraceLibraryStart              = DivaSTraceLibraryStart;
-	pLib->instance.DivaSTraceLibraryStop               = DivaSTraceLibraryStop;
-	pLib->instance.DivaSTraceLibraryFinit              = SuperTraceLibraryFinit;
-	pLib->instance.DivaSTraceMessageInput              = SuperTraceMessageInput;
-	pLib->instance.DivaSTraceGetHandle                 = SuperTraceGetHandle;
-	pLib->instance.DivaSTraceSetAudioTap               = SuperTraceSetAudioTap;
-	pLib->instance.DivaSTraceSetBChannel               = SuperTraceSetBChannel;
-	pLib->instance.DivaSTraceSetDChannel               = SuperTraceSetDChannel;
-	pLib->instance.DivaSTraceSetInfo                   = SuperTraceSetInfo;
-	pLib->instance.DivaSTraceGetOutgoingCallStatistics = \
-		SuperTraceGetOutgoingCallStatistics;
-	pLib->instance.DivaSTraceGetIncomingCallStatistics = \
-		SuperTraceGetIncomingCallStatistics;
-	pLib->instance.DivaSTraceGetModemStatistics        = \
-		SuperTraceGetModemStatistics;
-	pLib->instance.DivaSTraceGetFaxStatistics          = \
-		SuperTraceGetFaxStatistics;
-	pLib->instance.DivaSTraceGetBLayer1Statistics      = \
-		SuperTraceGetBLayer1Statistics;
-	pLib->instance.DivaSTraceGetBLayer2Statistics      = \
-		SuperTraceGetBLayer2Statistics;
-	pLib->instance.DivaSTraceGetDLayer1Statistics      = \
-		SuperTraceGetDLayer1Statistics;
-	pLib->instance.DivaSTraceGetDLayer2Statistics      = \
-		SuperTraceGetDLayer2Statistics;
-	pLib->instance.DivaSTraceClearCall                 = SuperTraceClearCall;
-
-
-	if (user_proc) {
-		pLib->user_proc_table.user_context      = user_proc->user_context;
-		pLib->user_proc_table.notify_proc       = user_proc->notify_proc;
-		pLib->user_proc_table.trace_proc        = user_proc->trace_proc;
-		pLib->user_proc_table.error_notify_proc = user_proc->error_notify_proc;
-	}
-
-	if (!(pLib->hAdapter = SuperTraceOpenAdapter(Adapter))) {
-		diva_mnt_internal_dprintf(0, DLI_ERR, "Can not open XDI adapter");
-		return NULL;
-	}
-	pLib->Channels = SuperTraceGetNumberOfChannels(pLib->hAdapter);
-
-	/*
-	  Calculate amount of parte table entites necessary to translate
-	  information from all events of onterest
-	*/
-	pLib->parse_entries = (MODEM_PARSE_ENTRIES + FAX_PARSE_ENTRIES + \
-			       STAT_PARSE_ENTRIES + \
-			       LINE_PARSE_ENTRIES + 1) * pLib->Channels;
-	pLib->parse_table = (diva_strace_path2action_t *)pmem;
-
-	for (i = 0; i < 30; i++) {
-		pLib->lines[i].pInterface     = &pLib->Interface;
-		pLib->lines[i].pInterfaceStat = &pLib->InterfaceStat;
-	}
-
-	pLib->e.R = &pLib->RData;
-
-	pLib->req_busy = 1;
-	pLib->rc_ok    = ASSIGN_OK;
-
-	diva_create_parse_table(pLib);
-
-	return ((diva_strace_library_interface_t *)pLib);
-}
-
-static int DivaSTraceLibraryStart(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	return (SuperTraceASSIGN(pLib->hAdapter, pLib->buffer));
-}
-
-/*
-  Return (-1) on error
-  Return (0) if was initiated or pending
-  Return (1) if removal is complete
-*/
-static int DivaSTraceLibraryStop(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	if (!pLib->e.Id) { /* Was never started/assigned */
-		return (1);
-	}
-
-	switch (pLib->removal_state) {
-	case 0:
-		pLib->removal_state = 1;
-		ScheduleNextTraceRequest(pLib);
-		break;
-
-	case 3:
-		return (1);
-	}
-
-	return (0);
-}
-
-static int SuperTraceLibraryFinit(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	if (pLib) {
-		if (pLib->hAdapter) {
-			SuperTraceCloseAdapter(pLib->hAdapter);
-		}
-		return (0);
-	}
-	return (-1);
-}
-
-static void *SuperTraceGetHandle(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	return (&pLib->e);
-}
-
-/*
-  After library handle object is gone in signaled state
-  this function should be called and will pick up incoming
-  IDI messages (return codes and indications).
-*/
-static int SuperTraceMessageInput(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	int ret = 0;
-	byte Rc, Ind;
-
-	if (pLib->e.complete == 255) {
-		/*
-		  Process return code
-		*/
-		pLib->req_busy = 0;
-		Rc             = pLib->e.Rc;
-		pLib->e.Rc     = 0;
-
-		if (pLib->removal_state == 2) {
-			pLib->removal_state = 3;
-			return (0);
-		}
-
-		if (Rc != pLib->rc_ok) {
-			int ignore = 0;
-			/*
-			  Auto-detect amount of events/channels and features
-			*/
-			if (pLib->general_b_ch_event == 1) {
-				pLib->general_b_ch_event = 2;
-				ignore = 1;
-			} else if (pLib->general_fax_event == 1) {
-				pLib->general_fax_event = 2;
-				ignore = 1;
-			} else if (pLib->general_mdm_event == 1) {
-				pLib->general_mdm_event = 2;
-				ignore = 1;
-			} else if ((pLib->ChannelsTraceActive < pLib->Channels) && pLib->ChannelsTraceActive) {
-				pLib->ChannelsTraceActive = pLib->Channels;
-				ignore = 1;
-			} else if (pLib->ModemTraceActive < pLib->Channels) {
-				pLib->ModemTraceActive = pLib->Channels;
-				ignore = 1;
-			} else if (pLib->FaxTraceActive < pLib->Channels) {
-				pLib->FaxTraceActive = pLib->Channels;
-				ignore = 1;
-			} else if (pLib->audio_trace_init == 2) {
-				ignore = 1;
-				pLib->audio_trace_init = 1;
-			} else if (pLib->eye_pattern_pending) {
-				pLib->eye_pattern_pending =  0;
-				ignore = 1;
-			} else if (pLib->audio_tap_pending) {
-				pLib->audio_tap_pending = 0;
-				ignore = 1;
-			}
-
-			if (!ignore) {
-				return (-1); /* request failed */
-			}
-		} else {
-			if (pLib->general_b_ch_event == 1) {
-				pLib->ChannelsTraceActive = pLib->Channels;
-				pLib->general_b_ch_event = 2;
-			} else if (pLib->general_fax_event == 1) {
-				pLib->general_fax_event = 2;
-				pLib->FaxTraceActive = pLib->Channels;
-			} else if (pLib->general_mdm_event == 1) {
-				pLib->general_mdm_event = 2;
-				pLib->ModemTraceActive = pLib->Channels;
-			}
-		}
-		if (pLib->audio_trace_init == 2) {
-			pLib->audio_trace_init = 1;
-		}
-		pLib->rc_ok = 0xff; /* default OK after assign was done */
-		if ((ret = ScheduleNextTraceRequest(pLib))) {
-			return (-1);
-		}
-	} else {
-		/*
-		  Process indication
-		  Always 'RNR' indication if return code is pending
-		*/
-		Ind         = pLib->e.Ind;
-		pLib->e.Ind = 0;
-		if (pLib->removal_state) {
-			pLib->e.RNum	= 0;
-			pLib->e.RNR	= 2;
-		} else if (pLib->req_busy) {
-			pLib->e.RNum	= 0;
-			pLib->e.RNR	= 1;
-		} else {
-			if (pLib->e.complete != 0x02) {
-				/*
-				  Look-ahead call, set up buffers
-				*/
-				pLib->e.RNum       = 1;
-				pLib->e.R->P       = (byte *)&pLib->buffer[0];
-				pLib->e.R->PLength = (word)(sizeof(pLib->buffer) - 1);
-
-			} else {
-				/*
-				  Indication reception complete, process it now
-				*/
-				byte *p = (byte *)&pLib->buffer[0];
-				pLib->buffer[pLib->e.R->PLength] = 0; /* terminate I.E. with zero */
-
-				switch (Ind) {
-				case MAN_COMBI_IND: {
-					int total_length    = pLib->e.R->PLength;
-					word  this_ind_length;
-
-					while (total_length > 3 && *p) {
-						Ind = *p++;
-						this_ind_length = (word)p[0] | ((word)p[1] << 8);
-						p += 2;
-
-						switch (Ind) {
-						case MAN_INFO_IND:
-							if (process_idi_info(pLib, (diva_man_var_header_t *)p)) {
-								return (-1);
-							}
-							break;
-						case MAN_EVENT_IND:
-							if (process_idi_event(pLib, (diva_man_var_header_t *)p)) {
-								return (-1);
-							}
-							break;
-						case MAN_TRACE_IND:
-							if (pLib->trace_on == 1) {
-								/*
-								  Ignore first trace event that is result of
-								  EVENT_ON operation
-								*/
-								pLib->trace_on++;
-							} else {
-								/*
-								  Delivery XLOG buffer to application
-								*/
-								if (pLib->user_proc_table.trace_proc) {
-									(*(pLib->user_proc_table.trace_proc))(pLib->user_proc_table.user_context,
-													      &pLib->instance, pLib->Adapter,
-													      p, this_ind_length);
-								}
-							}
-							break;
-						default:
-							diva_mnt_internal_dprintf(0, DLI_ERR, "Unknown IDI Ind (DMA mode): %02x", Ind);
-						}
-						p += (this_ind_length + 1);
-						total_length -= (4 + this_ind_length);
-					}
-				} break;
-				case MAN_INFO_IND:
-					if (process_idi_info(pLib, (diva_man_var_header_t *)p)) {
-						return (-1);
-					}
-					break;
-				case MAN_EVENT_IND:
-					if (process_idi_event(pLib, (diva_man_var_header_t *)p)) {
-						return (-1);
-					}
-					break;
-				case MAN_TRACE_IND:
-					if (pLib->trace_on == 1) {
-						/*
-						  Ignore first trace event that is result of
-						  EVENT_ON operation
-						*/
-						pLib->trace_on++;
-					} else {
-						/*
-						  Delivery XLOG buffer to application
-						*/
-						if (pLib->user_proc_table.trace_proc) {
-							(*(pLib->user_proc_table.trace_proc))(pLib->user_proc_table.user_context,
-											      &pLib->instance, pLib->Adapter,
-											      p, pLib->e.R->PLength);
-						}
-					}
-					break;
-				default:
-					diva_mnt_internal_dprintf(0, DLI_ERR, "Unknown IDI Ind: %02x", Ind);
-				}
-			}
-		}
-	}
-
-	if ((ret = ScheduleNextTraceRequest(pLib))) {
-		return (-1);
-	}
-
-	return (ret);
-}
-
-/*
-  Internal state machine responsible for scheduling of requests
-*/
-static int ScheduleNextTraceRequest(diva_strace_context_t *pLib) {
-	char name[64];
-	int ret = 0;
-	int i;
-
-	if (pLib->req_busy) {
-		return (0);
-	}
-
-	if (pLib->removal_state == 1) {
-		if (SuperTraceREMOVE(pLib->hAdapter)) {
-			pLib->removal_state = 3;
-		} else {
-			pLib->req_busy = 1;
-			pLib->removal_state = 2;
-		}
-		return (0);
-	}
-
-	if (pLib->removal_state) {
-		return (0);
-	}
-
-	if (!pLib->general_b_ch_event) {
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\B Event", pLib->buffer))) {
-			return (-1);
-		}
-		pLib->general_b_ch_event = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->general_fax_event) {
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\FAX Event", pLib->buffer))) {
-			return (-1);
-		}
-		pLib->general_fax_event = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->general_mdm_event) {
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\Modem Event", pLib->buffer))) {
-			return (-1);
-		}
-		pLib->general_mdm_event = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->ChannelsTraceActive < pLib->Channels) {
-		pLib->ChannelsTraceActive++;
-		sprintf(name, "State\\B%d\\Line", pLib->ChannelsTraceActive);
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
-			pLib->ChannelsTraceActive--;
-			return (-1);
-		}
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->ModemTraceActive < pLib->Channels) {
-		pLib->ModemTraceActive++;
-		sprintf(name, "State\\B%d\\Modem\\Event", pLib->ModemTraceActive);
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
-			pLib->ModemTraceActive--;
-			return (-1);
-		}
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->FaxTraceActive < pLib->Channels) {
-		pLib->FaxTraceActive++;
-		sprintf(name, "State\\B%d\\FAX\\Event", pLib->FaxTraceActive);
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
-			pLib->FaxTraceActive--;
-			return (-1);
-		}
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->trace_mask_init) {
-		word tmp = 0x0000;
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\Event Enable",
-				       &tmp,
-				       0x87, /* MI_BITFLD */
-					sizeof(tmp))) {
-			return (-1);
-		}
-		pLib->trace_mask_init = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->audio_trace_init) {
-		dword tmp = 0x00000000;
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\AudioCh# Enable",
-				       &tmp,
-				       0x87, /* MI_BITFLD */
-					sizeof(tmp))) {
-			return (-1);
-		}
-		pLib->audio_trace_init = 2;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->bchannel_init) {
-		dword tmp = 0x00000000;
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\B-Ch# Enable",
-				       &tmp,
-				       0x87, /* MI_BITFLD */
-					sizeof(tmp))) {
-			return (-1);
-		}
-		pLib->bchannel_init = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->trace_length_init) {
-		word tmp = 30;
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\Max Log Length",
-				       &tmp,
-				       0x82, /* MI_UINT */
-					sizeof(tmp))) {
-			return (-1);
-		}
-		pLib->trace_length_init = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->trace_on) {
-		if (SuperTraceTraceOnRequest(pLib->hAdapter,
-					     "Trace\\Log Buffer",
-					     pLib->buffer)) {
-			return (-1);
-		}
-		pLib->trace_on = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->trace_event_mask != pLib->current_trace_event_mask) {
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\Event Enable",
-				       &pLib->trace_event_mask,
-				       0x87, /* MI_BITFLD */
-					sizeof(pLib->trace_event_mask))) {
-			return (-1);
-		}
-		pLib->current_trace_event_mask = pLib->trace_event_mask;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if ((pLib->audio_tap_pending >= 0) && (pLib->audio_tap_mask != pLib->current_audio_tap_mask)) {
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\AudioCh# Enable",
-				       &pLib->audio_tap_mask,
-				       0x87, /* MI_BITFLD */
-					sizeof(pLib->audio_tap_mask))) {
-			return (-1);
-		}
-		pLib->current_audio_tap_mask = pLib->audio_tap_mask;
-		pLib->audio_tap_pending = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if ((pLib->eye_pattern_pending >= 0) && (pLib->audio_tap_mask != pLib->current_eye_pattern_mask)) {
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\EyeCh# Enable",
-				       &pLib->audio_tap_mask,
-				       0x87, /* MI_BITFLD */
-					sizeof(pLib->audio_tap_mask))) {
-			return (-1);
-		}
-		pLib->current_eye_pattern_mask = pLib->audio_tap_mask;
-		pLib->eye_pattern_pending = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->bchannel_trace_mask != pLib->current_bchannel_trace_mask) {
-		if (SuperTraceWriteVar(pLib->hAdapter,
-				       pLib->buffer,
-				       "Trace\\B-Ch# Enable",
-				       &pLib->bchannel_trace_mask,
-				       0x87, /* MI_BITFLD */
-					sizeof(pLib->bchannel_trace_mask))) {
-			return (-1);
-		}
-		pLib->current_bchannel_trace_mask = pLib->bchannel_trace_mask;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->trace_events_down) {
-		if (SuperTraceTraceOnRequest(pLib->hAdapter,
-					     "Events Down",
-					     pLib->buffer)) {
-			return (-1);
-		}
-		pLib->trace_events_down = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->l1_trace) {
-		if (SuperTraceTraceOnRequest(pLib->hAdapter,
-					     "State\\Layer1",
-					     pLib->buffer)) {
-			return (-1);
-		}
-		pLib->l1_trace = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->l2_trace) {
-		if (SuperTraceTraceOnRequest(pLib->hAdapter,
-					     "State\\Layer2 No1",
-					     pLib->buffer)) {
-			return (-1);
-		}
-		pLib->l2_trace = 1;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	for (i = 0; i < 30; i++) {
-		if (pLib->pending_line_status & (1L << i)) {
-			sprintf(name, "State\\B%d", i + 1);
-			if (SuperTraceReadRequest(pLib->hAdapter, name, pLib->buffer)) {
-				return (-1);
-			}
-			pLib->pending_line_status &= ~(1L << i);
-			pLib->req_busy = 1;
-			return (0);
-		}
-		if (pLib->pending_modem_status & (1L << i)) {
-			sprintf(name, "State\\B%d\\Modem", i + 1);
-			if (SuperTraceReadRequest(pLib->hAdapter, name, pLib->buffer)) {
-				return (-1);
-			}
-			pLib->pending_modem_status &= ~(1L << i);
-			pLib->req_busy = 1;
-			return (0);
-		}
-		if (pLib->pending_fax_status & (1L << i)) {
-			sprintf(name, "State\\B%d\\FAX", i + 1);
-			if (SuperTraceReadRequest(pLib->hAdapter, name, pLib->buffer)) {
-				return (-1);
-			}
-			pLib->pending_fax_status &= ~(1L << i);
-			pLib->req_busy = 1;
-			return (0);
-		}
-		if (pLib->clear_call_command & (1L << i)) {
-			sprintf(name, "State\\B%d\\Clear Call", i + 1);
-			if (SuperTraceExecuteRequest(pLib->hAdapter, name, pLib->buffer)) {
-				return (-1);
-			}
-			pLib->clear_call_command &= ~(1L << i);
-			pLib->req_busy = 1;
-			return (0);
-		}
-	}
-
-	if (pLib->outgoing_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\Outgoing Calls",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->outgoing_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->incoming_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\Incoming Calls",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->incoming_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->modem_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\Modem",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->modem_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->fax_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\FAX",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->fax_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->b1_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\B-Layer1",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->b1_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->b2_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\B-Layer2",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->b2_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->d1_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\D-Layer1",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->d1_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (pLib->d2_ifc_stats) {
-		if (SuperTraceReadRequest(pLib->hAdapter,
-					  "Statistics\\D-Layer2",
-					  pLib->buffer)) {
-			return (-1);
-		}
-		pLib->d2_ifc_stats = 0;
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	if (!pLib->IncomingCallsCallsActive) {
-		pLib->IncomingCallsCallsActive = 1;
-		sprintf(name, "%s", "Statistics\\Incoming Calls\\Calls");
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
-			pLib->IncomingCallsCallsActive = 0;
-			return (-1);
-		}
-		pLib->req_busy = 1;
-		return (0);
-	}
-	if (!pLib->IncomingCallsConnectedActive) {
-		pLib->IncomingCallsConnectedActive = 1;
-		sprintf(name, "%s", "Statistics\\Incoming Calls\\Connected");
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
-			pLib->IncomingCallsConnectedActive = 0;
-			return (-1);
-		}
-		pLib->req_busy = 1;
-		return (0);
-	}
-	if (!pLib->OutgoingCallsCallsActive) {
-		pLib->OutgoingCallsCallsActive = 1;
-		sprintf(name, "%s", "Statistics\\Outgoing Calls\\Calls");
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
-			pLib->OutgoingCallsCallsActive = 0;
-			return (-1);
-		}
-		pLib->req_busy = 1;
-		return (0);
-	}
-	if (!pLib->OutgoingCallsConnectedActive) {
-		pLib->OutgoingCallsConnectedActive = 1;
-		sprintf(name, "%s", "Statistics\\Outgoing Calls\\Connected");
-		if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
-			pLib->OutgoingCallsConnectedActive = 0;
-			return (-1);
-		}
-		pLib->req_busy = 1;
-		return (0);
-	}
-
-	return (0);
-}
-
-static int process_idi_event(diva_strace_context_t *pLib,
-			     diva_man_var_header_t *pVar) {
-	const char *path = (char *)&pVar->path_length + 1;
-	char name[64];
-	int i;
-
-	if (!strncmp("State\\B Event", path, pVar->path_length)) {
-		dword ch_id;
-		if (!diva_trace_read_variable(pVar, &ch_id)) {
-			if (!pLib->line_init_event && !pLib->pending_line_status) {
-				for (i = 1; i <= pLib->Channels; i++) {
-					diva_line_event(pLib, i);
-				}
-				return (0);
-			} else if (ch_id && ch_id <= pLib->Channels) {
-				return (diva_line_event(pLib, (int)ch_id));
-			}
-			return (0);
-		}
-		return (-1);
-	}
-
-	if (!strncmp("State\\FAX Event", path, pVar->path_length)) {
-		dword ch_id;
-		if (!diva_trace_read_variable(pVar, &ch_id)) {
-			if (!pLib->pending_fax_status && !pLib->fax_init_event) {
-				for (i = 1; i <= pLib->Channels; i++) {
-					diva_fax_event(pLib, i);
-				}
-				return (0);
-			} else if (ch_id && ch_id <= pLib->Channels) {
-				return (diva_fax_event(pLib, (int)ch_id));
-			}
-			return (0);
-		}
-		return (-1);
-	}
-
-	if (!strncmp("State\\Modem Event", path, pVar->path_length)) {
-		dword ch_id;
-		if (!diva_trace_read_variable(pVar, &ch_id)) {
-			if (!pLib->pending_modem_status && !pLib->modem_init_event) {
-				for (i = 1; i <= pLib->Channels; i++) {
-					diva_modem_event(pLib, i);
-				}
-				return (0);
-			} else if (ch_id && ch_id <= pLib->Channels) {
-				return (diva_modem_event(pLib, (int)ch_id));
-			}
-			return (0);
-		}
-		return (-1);
-	}
-
-	/*
-	  First look for Line Event
-	*/
-	for (i = 1; i <= pLib->Channels; i++) {
-		sprintf(name, "State\\B%d\\Line", i);
-		if (find_var(pVar, name)) {
-			return (diva_line_event(pLib, i));
-		}
-	}
-
-	/*
-	  Look for Moden Progress Event
-	*/
-	for (i = 1; i <= pLib->Channels; i++) {
-		sprintf(name, "State\\B%d\\Modem\\Event", i);
-		if (find_var(pVar, name)) {
-			return (diva_modem_event(pLib, i));
-		}
-	}
-
-	/*
-	  Look for Fax Event
-	*/
-	for (i = 1; i <= pLib->Channels; i++) {
-		sprintf(name, "State\\B%d\\FAX\\Event", i);
-		if (find_var(pVar, name)) {
-			return (diva_fax_event(pLib, i));
-		}
-	}
-
-	/*
-	  Notification about loss of events
-	*/
-	if (!strncmp("Events Down", path, pVar->path_length)) {
-		if (pLib->trace_events_down == 1) {
-			pLib->trace_events_down = 2;
-		} else {
-			diva_trace_error(pLib, 1, "Events Down", 0);
-		}
-		return (0);
-	}
-
-	if (!strncmp("State\\Layer1", path, pVar->path_length)) {
-		diva_strace_read_asz(pVar, &pLib->lines[0].pInterface->Layer1[0]);
-		if (pLib->l1_trace == 1) {
-			pLib->l1_trace = 2;
-		} else {
-			diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_INTERFACE_CHANGE);
-		}
-		return (0);
-	}
-	if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) {
-		char *tmp = &pLib->lines[0].pInterface->Layer2[0];
-		dword l2_state;
-		if (diva_strace_read_uint(pVar, &l2_state))
-			return -1;
-
-		switch (l2_state) {
-		case 0:
-			strcpy(tmp, "Idle");
-			break;
-		case 1:
-			strcpy(tmp, "Layer2 UP");
-			break;
-		case 2:
-			strcpy(tmp, "Layer2 Disconnecting");
-			break;
-		case 3:
-			strcpy(tmp, "Layer2 Connecting");
-			break;
-		case 4:
-			strcpy(tmp, "SPID Initializing");
-			break;
-		case 5:
-			strcpy(tmp, "SPID Initialised");
-			break;
-		case 6:
-			strcpy(tmp, "Layer2 Connecting");
-			break;
-
-		case  7:
-			strcpy(tmp, "Auto SPID Stopped");
-			break;
-
-		case  8:
-			strcpy(tmp, "Auto SPID Idle");
-			break;
-
-		case  9:
-			strcpy(tmp, "Auto SPID Requested");
-			break;
-
-		case  10:
-			strcpy(tmp, "Auto SPID Delivery");
-			break;
-
-		case 11:
-			strcpy(tmp, "Auto SPID Complete");
-			break;
-
-		default:
-			sprintf(tmp, "U:%d", (int)l2_state);
-		}
-		if (pLib->l2_trace == 1) {
-			pLib->l2_trace = 2;
-		} else {
-			diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_INTERFACE_CHANGE);
-		}
-		return (0);
-	}
-
-	if (!strncmp("Statistics\\Incoming Calls\\Calls", path, pVar->path_length) ||
-	    !strncmp("Statistics\\Incoming Calls\\Connected", path, pVar->path_length)) {
-		return (SuperTraceGetIncomingCallStatistics(pLib));
-	}
-
-	if (!strncmp("Statistics\\Outgoing Calls\\Calls", path, pVar->path_length) ||
-	    !strncmp("Statistics\\Outgoing Calls\\Connected", path, pVar->path_length)) {
-		return (SuperTraceGetOutgoingCallStatistics(pLib));
-	}
-
-	return (-1);
-}
-
-static int diva_line_event(diva_strace_context_t *pLib, int Channel) {
-	pLib->pending_line_status |= (1L << (Channel - 1));
-	return (0);
-}
-
-static int diva_modem_event(diva_strace_context_t *pLib, int Channel) {
-	pLib->pending_modem_status |= (1L << (Channel - 1));
-	return (0);
-}
-
-static int diva_fax_event(diva_strace_context_t *pLib, int Channel) {
-	pLib->pending_fax_status |= (1L << (Channel - 1));
-	return (0);
-}
-
-/*
-  Process INFO indications that arrive from the card
-  Uses path of first I.E. to detect the source of the
-  infication
-*/
-static int process_idi_info(diva_strace_context_t *pLib,
-			    diva_man_var_header_t *pVar) {
-	const char *path = (char *)&pVar->path_length + 1;
-	char name[64];
-	int i, len;
-
-	/*
-	  First look for Modem Status Info
-	*/
-	for (i = pLib->Channels; i > 0; i--) {
-		len = sprintf(name, "State\\B%d\\Modem", i);
-		if (!strncmp(name, path, len)) {
-			return (diva_modem_info(pLib, i, pVar));
-		}
-	}
-
-	/*
-	  Look for Fax Status Info
-	*/
-	for (i = pLib->Channels; i > 0; i--) {
-		len = sprintf(name, "State\\B%d\\FAX", i);
-		if (!strncmp(name, path, len)) {
-			return (diva_fax_info(pLib, i, pVar));
-		}
-	}
-
-	/*
-	  Look for Line Status Info
-	*/
-	for (i = pLib->Channels; i > 0; i--) {
-		len = sprintf(name, "State\\B%d", i);
-		if (!strncmp(name, path, len)) {
-			return (diva_line_info(pLib, i, pVar));
-		}
-	}
-
-	if (!diva_ifc_statistics(pLib, pVar)) {
-		return (0);
-	}
-
-	return (-1);
-}
-
-/*
-  MODEM INSTANCE STATE UPDATE
-
-  Update Modem Status Information and issue notification to user,
-  that will inform about change in the state of modem instance, that is
-  associuated with this channel
-*/
-static int diva_modem_info(diva_strace_context_t *pLib,
-			   int Channel,
-			   diva_man_var_header_t *pVar) {
-	diva_man_var_header_t *cur;
-	int i, nr = Channel - 1;
-
-	for (i  = pLib->modem_parse_entry_first[nr];
-	     i <= pLib->modem_parse_entry_last[nr]; i++) {
-		if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
-			if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
-				diva_trace_error(pLib, -3, __FILE__, __LINE__);
-				return (-1);
-			}
-		} else {
-			diva_trace_error(pLib, -2, __FILE__, __LINE__);
-			return (-1);
-		}
-	}
-
-	/*
-	  We do not use first event to notify user - this is the event that is
-	  generated as result of EVENT ON operation and is used only to initialize
-	  internal variables of application
-	*/
-	if (pLib->modem_init_event & (1L << nr)) {
-		diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE);
-	} else {
-		pLib->modem_init_event |= (1L << nr);
-	}
-
-	return (0);
-}
-
-static int diva_fax_info(diva_strace_context_t *pLib,
-			 int Channel,
-			 diva_man_var_header_t *pVar) {
-	diva_man_var_header_t *cur;
-	int i, nr = Channel - 1;
-
-	for (i  = pLib->fax_parse_entry_first[nr];
-	     i <= pLib->fax_parse_entry_last[nr]; i++) {
-		if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
-			if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
-				diva_trace_error(pLib, -3, __FILE__, __LINE__);
-				return (-1);
-			}
-		} else {
-			diva_trace_error(pLib, -2, __FILE__, __LINE__);
-			return (-1);
-		}
-	}
-
-	/*
-	  We do not use first event to notify user - this is the event that is
-	  generated as result of EVENT ON operation and is used only to initialize
-	  internal variables of application
-	*/
-	if (pLib->fax_init_event & (1L << nr)) {
-		diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE);
-	} else {
-		pLib->fax_init_event |= (1L << nr);
-	}
-
-	return (0);
-}
-
-/*
-  LINE STATE UPDATE
-  Update Line Status Information and issue notification to user,
-  that will inform about change in the line state.
-*/
-static int diva_line_info(diva_strace_context_t *pLib,
-			  int Channel,
-			  diva_man_var_header_t *pVar) {
-	diva_man_var_header_t *cur;
-	int i, nr = Channel - 1;
-
-	for (i = pLib->line_parse_entry_first[nr];
-	     i <= pLib->line_parse_entry_last[nr]; i++) {
-		if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
-			if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
-				diva_trace_error(pLib, -3, __FILE__, __LINE__);
-				return (-1);
-			}
-		} else {
-			diva_trace_error(pLib, -2 , __FILE__, __LINE__);
-			return (-1);
-		}
-	}
-
-	/*
-	  We do not use first event to notify user - this is the event that is
-	  generated as result of EVENT ON operation and is used only to initialize
-	  internal variables of application
-
-	  Exception is is if the line is "online". In this case we have to notify
-	  user about this confition.
-	*/
-	if (pLib->line_init_event & (1L << nr)) {
-		diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE);
-	} else {
-		pLib->line_init_event |= (1L << nr);
-		if (strcmp(&pLib->lines[nr].Line[0], "Idle")) {
-			diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE);
-		}
-	}
-
-	return (0);
-}
-
-/*
-  Move position to next vatianle in the chain
-*/
-static diva_man_var_header_t *get_next_var(diva_man_var_header_t *pVar) {
-	byte *msg = (byte *)pVar;
-	byte *start;
-	int msg_length;
-
-	if (*msg != ESC) return NULL;
-
-	start = msg + 2;
-	msg_length = *(msg + 1);
-	msg = (start + msg_length);
-
-	if (*msg != ESC) return NULL;
-
-	return ((diva_man_var_header_t *)msg);
-}
-
-/*
-  Move position to variable with given name
-*/
-static diva_man_var_header_t *find_var(diva_man_var_header_t *pVar,
-				       const char *name) {
-	const char *path;
-
-	do {
-		path = (char *)&pVar->path_length + 1;
-
-		if (!strncmp(name, path, pVar->path_length)) {
-			break;
-		}
-	} while ((pVar = get_next_var(pVar)));
-
-	return (pVar);
-}
-
-static void diva_create_line_parse_table(diva_strace_context_t *pLib,
-					 int Channel) {
-	diva_trace_line_state_t *pLine = &pLib->lines[Channel];
-	int nr = Channel + 1;
-
-	if ((pLib->cur_parse_entry + LINE_PARSE_ENTRIES) >= pLib->parse_entries) {
-		diva_trace_error(pLib, -1, __FILE__, __LINE__);
-		return;
-	}
-
-	pLine->ChannelNumber = nr;
-
-	pLib->line_parse_entry_first[Channel] = pLib->cur_parse_entry;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Framing", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Framing[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Line", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Line[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Layer2", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Layer2[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Layer3", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Layer3[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Remote Address", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLine->RemoteAddress[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Remote SubAddr", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLine->RemoteSubAddress[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Local Address", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLine->LocalAddress[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Local SubAddr", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLine->LocalSubAddress[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\BC", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_BC;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\HLC", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_HLC;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\LLC", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_LLC;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Charges", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Charges;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Call Reference", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->CallReference;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Last Disc Cause", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLine->LastDisconnecCause;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\User ID", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->UserID[0];
-
-	pLib->line_parse_entry_last[Channel] = pLib->cur_parse_entry - 1;
-}
-
-static void diva_create_fax_parse_table(diva_strace_context_t *pLib,
-					int Channel) {
-	diva_trace_fax_state_t *pFax = &pLib->lines[Channel].fax;
-	int nr = Channel + 1;
-
-	if ((pLib->cur_parse_entry + FAX_PARSE_ENTRIES) >= pLib->parse_entries) {
-		diva_trace_error(pLib, -1, __FILE__, __LINE__);
-		return;
-	}
-	pFax->ChannelNumber = nr;
-
-	pLib->fax_parse_entry_first[Channel] = pLib->cur_parse_entry;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Event", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Event;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Page Counter", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Page_Counter;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Features", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Features;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Station ID", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Station_ID[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Subaddress", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Subaddress[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Password", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Password[0];
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Speed", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Speed;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Resolution", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Resolution;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Paper Width", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Paper_Width;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Paper Length", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Paper_Length;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Scanline Time", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Scanline_Time;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\FAX\\Disc Reason", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Disc_Reason;
-
-	pLib->fax_parse_entry_last[Channel] = pLib->cur_parse_entry - 1;
-}
-
-static void diva_create_modem_parse_table(diva_strace_context_t *pLib,
-					  int Channel) {
-	diva_trace_modem_state_t *pModem = &pLib->lines[Channel].modem;
-	int nr = Channel + 1;
-
-	if ((pLib->cur_parse_entry + MODEM_PARSE_ENTRIES) >= pLib->parse_entries) {
-		diva_trace_error(pLib, -1, __FILE__, __LINE__);
-		return;
-	}
-	pModem->ChannelNumber = nr;
-
-	pLib->modem_parse_entry_first[Channel] = pLib->cur_parse_entry;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Event", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Event;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Norm", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Norm;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Options", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Options;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\TX Speed", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->TxSpeed;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\RX Speed", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RxSpeed;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Roundtrip ms", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RoundtripMsec;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Symbol Rate", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->SymbolRate;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\RX Level dBm", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RxLeveldBm;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Echo Level dBm", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->EchoLeveldBm;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\SNR dB", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->SNRdb;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\MAE", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->MAE;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Local Retrains", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->LocalRetrains;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Remote Retrains", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RemoteRetrains;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Local Resyncs", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->LocalResyncs;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Remote Resyncs", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RemoteResyncs;
-
-	sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
-		"State\\B%d\\Modem\\Disc Reason", nr);
-	pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->DiscReason;
-
-	pLib->modem_parse_entry_last[Channel] = pLib->cur_parse_entry - 1;
-}
-
-static void diva_create_parse_table(diva_strace_context_t *pLib) {
-	int i;
-
-	for (i = 0; i < pLib->Channels; i++) {
-		diva_create_line_parse_table(pLib, i);
-		diva_create_modem_parse_table(pLib, i);
-		diva_create_fax_parse_table(pLib, i);
-	}
-
-	pLib->statistic_parse_first = pLib->cur_parse_entry;
-
-	/*
-	  Outgoing Calls
-	*/
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Outgoing Calls\\Calls");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.outg.Calls;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Outgoing Calls\\Connected");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.outg.Connected;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Outgoing Calls\\User Busy");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.outg.User_Busy;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Outgoing Calls\\No Answer");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.outg.No_Answer;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Outgoing Calls\\Wrong Number");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.outg.Wrong_Number;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Outgoing Calls\\Call Rejected");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.outg.Call_Rejected;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Outgoing Calls\\Other Failures");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.outg.Other_Failures;
-
-	/*
-	  Incoming Calls
-	*/
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\Calls");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.Calls;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\Connected");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.Connected;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\User Busy");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.User_Busy;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\Call Rejected");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.Call_Rejected;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\Wrong Number");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.Wrong_Number;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\Incompatible Dst");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.Incompatible_Dst;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\Out of Order");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.Out_of_Order;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Incoming Calls\\Ignored");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.inc.Ignored;
-
-	/*
-	  Modem Statistics
-	*/
-	pLib->mdm_statistic_parse_first = pLib->cur_parse_entry;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Normal");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Normal;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Unspecified");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Unspecified;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Busy Tone");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Busy_Tone;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Congestion");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Congestion;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Carr. Wait");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Carr_Wait;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Trn Timeout");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Trn_Timeout;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Incompat.");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Incompat;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc Frame Rej.");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_Frame_Rej;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\Modem\\Disc V42bis");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.mdm.Disc_V42bis;
-
-	pLib->mdm_statistic_parse_last  = pLib->cur_parse_entry - 1;
-
-	/*
-	  Fax Statistics
-	*/
-	pLib->fax_statistic_parse_first = pLib->cur_parse_entry;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Normal");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Normal;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Not Ident.");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Not_Ident;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc No Response");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_No_Response;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Retries");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Retries;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Unexp. Msg.");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Unexp_Msg;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc No Polling.");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_No_Polling;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Training");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Training;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Unexpected");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Unexpected;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Application");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Application;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Incompat.");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Incompat;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc No Command");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_No_Command;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Long Msg");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Long_Msg;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Supervisor");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Supervisor;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc SUB SEP PWD");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_SUB_SEP_PWD;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Invalid Msg");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Invalid_Msg;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Page Coding");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Page_Coding;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc App Timeout");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_App_Timeout;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\FAX\\Disc Unspecified");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.fax.Disc_Unspecified;
-
-	pLib->fax_statistic_parse_last  = pLib->cur_parse_entry - 1;
-
-	/*
-	  B-Layer1"
-	*/
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer1\\X-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b1.X_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer1\\X-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b1.X_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer1\\X-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b1.X_Errors;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer1\\R-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b1.R_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer1\\R-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b1.R_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer1\\R-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b1.R_Errors;
-
-	/*
-	  B-Layer2
-	*/
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer2\\X-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b2.X_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer2\\X-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b2.X_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer2\\X-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b2.X_Errors;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer2\\R-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b2.R_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer2\\R-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b2.R_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\B-Layer2\\R-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.b2.R_Errors;
-
-	/*
-	  D-Layer1
-	*/
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer1\\X-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d1.X_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer1\\X-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d1.X_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer1\\X-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d1.X_Errors;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer1\\R-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d1.R_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer1\\R-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d1.R_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer1\\R-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d1.R_Errors;
-
-	/*
-	  D-Layer2
-	*/
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer2\\X-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d2.X_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer2\\X-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d2.X_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer2\\X-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d2.X_Errors;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer2\\R-Frames");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d2.R_Frames;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer2\\R-Bytes");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d2.R_Bytes;
-
-	strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
-	       "Statistics\\D-Layer2\\R-Errors");
-	pLib->parse_table[pLib->cur_parse_entry++].variable = \
-		&pLib->InterfaceStat.d2.R_Errors;
-
-
-	pLib->statistic_parse_last  = pLib->cur_parse_entry - 1;
-}
-
-static void diva_trace_error(diva_strace_context_t *pLib,
-			     int error, const char *file, int line) {
-	if (pLib->user_proc_table.error_notify_proc) {
-		(*(pLib->user_proc_table.error_notify_proc))(\
-			pLib->user_proc_table.user_context,
-			&pLib->instance, pLib->Adapter,
-			error, file, line);
-	}
-}
-
-/*
-  Delivery notification to user
-*/
-static void diva_trace_notify_user(diva_strace_context_t *pLib,
-				   int Channel,
-				   int notify_subject) {
-	if (pLib->user_proc_table.notify_proc) {
-		(*(pLib->user_proc_table.notify_proc))(pLib->user_proc_table.user_context,
-						       &pLib->instance,
-						       pLib->Adapter,
-						       &pLib->lines[Channel],
-						       notify_subject);
-	}
-}
-
-/*
-  Read variable value to they destination based on the variable type
-*/
-static int diva_trace_read_variable(diva_man_var_header_t *pVar,
-				    void *variable) {
-	switch (pVar->type) {
-	case 0x03: /* MI_ASCIIZ - syting                               */
-		return (diva_strace_read_asz(pVar, (char *)variable));
-	case 0x04: /* MI_ASCII  - string                               */
-		return (diva_strace_read_asc(pVar, (char *)variable));
-	case 0x05: /* MI_NUMBER - counted sequence of bytes            */
-		return (diva_strace_read_ie(pVar, (diva_trace_ie_t *)variable));
-	case 0x81: /* MI_INT    - signed integer                       */
-		return (diva_strace_read_int(pVar, (int *)variable));
-	case 0x82: /* MI_UINT   - unsigned integer                     */
-		return (diva_strace_read_uint(pVar, (dword *)variable));
-	case 0x83: /* MI_HINT   - unsigned integer, hex representetion */
-		return (diva_strace_read_uint(pVar, (dword *)variable));
-	case 0x87: /* MI_BITFLD - unsigned integer, bit representation */
-		return (diva_strace_read_uint(pVar, (dword *)variable));
-	}
-
-	/*
-	  This type of variable is not handled, indicate error
-	  Or one problem in management interface, or in application recodeing
-	  table, or this application should handle it.
-	*/
-	return (-1);
-}
-
-/*
-  Read signed integer to destination
-*/
-static int diva_strace_read_int(diva_man_var_header_t *pVar, int *var) {
-	byte *ptr = (char *)&pVar->path_length;
-	int value;
-
-	ptr += (pVar->path_length + 1);
-
-	switch (pVar->value_length) {
-	case 1:
-		value = *(char *)ptr;
-		break;
-
-	case 2:
-		value = (short)GET_WORD(ptr);
-		break;
-
-	case 4:
-		value = (int)GET_DWORD(ptr);
-		break;
-
-	default:
-		return (-1);
-	}
-
-	*var = value;
-
-	return (0);
-}
-
-static int diva_strace_read_uint(diva_man_var_header_t *pVar, dword *var) {
-	byte *ptr = (char *)&pVar->path_length;
-	dword value;
-
-	ptr += (pVar->path_length + 1);
-
-	switch (pVar->value_length) {
-	case 1:
-		value = (byte)(*ptr);
-		break;
-
-	case 2:
-		value = (word)GET_WORD(ptr);
-		break;
-
-	case 3:
-		value  = (dword)GET_DWORD(ptr);
-		value &= 0x00ffffff;
-		break;
-
-	case 4:
-		value = (dword)GET_DWORD(ptr);
-		break;
-
-	default:
-		return (-1);
-	}
-
-	*var = value;
-
-	return (0);
-}
-
-/*
-  Read zero terminated ASCII string
-*/
-static int diva_strace_read_asz(diva_man_var_header_t *pVar, char *var) {
-	char *ptr = (char *)&pVar->path_length;
-	int length;
-
-	ptr += (pVar->path_length + 1);
-
-	if (!(length = pVar->value_length)) {
-		length = strlen(ptr);
-	}
-	memcpy(var, ptr, length);
-	var[length] = 0;
-
-	return (0);
-}
-
-/*
-  Read counted (with leading length byte) ASCII string
-*/
-static int diva_strace_read_asc(diva_man_var_header_t *pVar, char *var) {
-	char *ptr = (char *)&pVar->path_length;
-
-	ptr += (pVar->path_length + 1);
-	memcpy(var, ptr + 1, *ptr);
-	var[(int)*ptr] = 0;
-
-	return (0);
-}
-
-/*
-  Read one information element - i.e. one string of byte values with
-  one length byte in front
-*/
-static int diva_strace_read_ie(diva_man_var_header_t *pVar,
-			       diva_trace_ie_t *var) {
-	char *ptr = (char *)&pVar->path_length;
-
-	ptr += (pVar->path_length + 1);
-
-	var->length = *ptr;
-	memcpy(&var->data[0], ptr + 1, *ptr);
-
-	return (0);
-}
-
-static int SuperTraceSetAudioTap(void *hLib, int Channel, int on) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	if ((Channel < 1) || (Channel > pLib->Channels)) {
-		return (-1);
-	}
-	Channel--;
-
-	if (on) {
-		pLib->audio_tap_mask |=  (1L << Channel);
-	} else {
-		pLib->audio_tap_mask &= ~(1L << Channel);
-	}
-
-	/*
-	  EYE patterns have TM_M_DATA set as additional
-	  condition
-	*/
-	if (pLib->audio_tap_mask) {
-		pLib->trace_event_mask |= TM_M_DATA;
-	} else {
-		pLib->trace_event_mask &= ~TM_M_DATA;
-	}
-
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceSetBChannel(void *hLib, int Channel, int on) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	if ((Channel < 1) || (Channel > pLib->Channels)) {
-		return (-1);
-	}
-	Channel--;
-
-	if (on) {
-		pLib->bchannel_trace_mask |=  (1L << Channel);
-	} else {
-		pLib->bchannel_trace_mask &= ~(1L << Channel);
-	}
-
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceSetDChannel(void *hLib, int on) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	if (on) {
-		pLib->trace_event_mask |= (TM_D_CHAN | TM_C_COMM | TM_DL_ERR | TM_LAYER1);
-	} else {
-		pLib->trace_event_mask &= ~(TM_D_CHAN | TM_C_COMM | TM_DL_ERR | TM_LAYER1);
-	}
-
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceSetInfo(void *hLib, int on) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	if (on) {
-		pLib->trace_event_mask |= TM_STRING;
-	} else {
-		pLib->trace_event_mask &= ~TM_STRING;
-	}
-
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceClearCall(void *hLib, int Channel) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
-	if ((Channel < 1) || (Channel > pLib->Channels)) {
-		return (-1);
-	}
-	Channel--;
-
-	pLib->clear_call_command |= (1L << Channel);
-
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-/*
-  Parse and update cumulative statistice
-*/
-static int diva_ifc_statistics(diva_strace_context_t *pLib,
-			       diva_man_var_header_t *pVar) {
-	diva_man_var_header_t *cur;
-	int i, one_updated = 0, mdm_updated = 0, fax_updated = 0;
-
-	for (i  = pLib->statistic_parse_first; i <= pLib->statistic_parse_last; i++) {
-		if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
-			if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
-				diva_trace_error(pLib, -3 , __FILE__, __LINE__);
-				return (-1);
-			}
-			one_updated = 1;
-			if ((i >= pLib->mdm_statistic_parse_first) && (i <= pLib->mdm_statistic_parse_last)) {
-				mdm_updated = 1;
-			}
-			if ((i >= pLib->fax_statistic_parse_first) && (i <= pLib->fax_statistic_parse_last)) {
-				fax_updated = 1;
-			}
-		}
-	}
-
-	/*
-	  We do not use first event to notify user - this is the event that is
-	  generated as result of EVENT ON operation and is used only to initialize
-	  internal variables of application
-	*/
-	if (mdm_updated) {
-		diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE);
-	} else if (fax_updated) {
-		diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE);
-	} else if (one_updated) {
-		diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE);
-	}
-
-	return (one_updated ? 0 : -1);
-}
-
-static int SuperTraceGetOutgoingCallStatistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->outgoing_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetIncomingCallStatistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->incoming_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetModemStatistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->modem_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetFaxStatistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->fax_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetBLayer1Statistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->b1_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetBLayer2Statistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->b2_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetDLayer1Statistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->d1_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetDLayer2Statistics(void *hLib) {
-	diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-	pLib->d2_ifc_stats = 1;
-	return (ScheduleNextTraceRequest(pLib));
-}
-
-dword DivaSTraceGetMemotyRequirement(int channels) {
-	dword parse_entries = (MODEM_PARSE_ENTRIES + FAX_PARSE_ENTRIES + \
-			       STAT_PARSE_ENTRIES + \
-			       LINE_PARSE_ENTRIES + 1) * channels;
-	return (sizeof(diva_strace_context_t) + \
-		(parse_entries * sizeof(diva_strace_path2action_t)));
-}
diff --git a/drivers/isdn/hardware/eicon/maintidi.h b/drivers/isdn/hardware/eicon/maintidi.h
deleted file mode 100644
index 2b46147..0000000
--- a/drivers/isdn/hardware/eicon/maintidi.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2000.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_EICON_TRACE_IDI_IFC_H__
-#define __DIVA_EICON_TRACE_IDI_IFC_H__
-
-void *SuperTraceOpenAdapter(int AdapterNumber);
-int SuperTraceCloseAdapter(void *AdapterHandle);
-int SuperTraceWrite(void *AdapterHandle,
-		    const void *data, int length);
-int SuperTraceReadRequest(void *AdapterHandle, const char *name, byte *data);
-int SuperTraceGetNumberOfChannels(void *AdapterHandle);
-int SuperTraceASSIGN(void *AdapterHandle, byte *data);
-int SuperTraceREMOVE(void *AdapterHandle);
-int SuperTraceTraceOnRequest(void *hAdapter, const char *name, byte *data);
-int SuperTraceWriteVar(void *AdapterHandle,
-		       byte *data,
-		       const char *name,
-		       void *var,
-		       byte type,
-		       byte var_length);
-int SuperTraceExecuteRequest(void *AdapterHandle,
-			     const char *name,
-			     byte *data);
-
-typedef struct _diva_strace_path2action {
-	char path[64]; /* Full path to variable            */
-	void *variable; /* Variable that will receive value */
-} diva_strace_path2action_t;
-
-#define DIVA_MAX_MANAGEMENT_TRANSFER_SIZE 4096
-
-typedef struct _diva_strace_context {
-	diva_strace_library_interface_t	instance;
-
-	int Adapter;
-	void *hAdapter;
-
-	int Channels;
-	int req_busy;
-
-	ENTITY e;
-	IDI_CALL request;
-	BUFFERS XData;
-	BUFFERS RData;
-	byte buffer[DIVA_MAX_MANAGEMENT_TRANSFER_SIZE + 1];
-	int removal_state;
-	int general_b_ch_event;
-	int general_fax_event;
-	int general_mdm_event;
-
-	byte rc_ok;
-
-	/*
-	  Initialization request state machine
-	*/
-	int ChannelsTraceActive;
-	int ModemTraceActive;
-	int FaxTraceActive;
-	int IncomingCallsCallsActive;
-	int IncomingCallsConnectedActive;
-	int OutgoingCallsCallsActive;
-	int OutgoingCallsConnectedActive;
-
-	int trace_mask_init;
-	int audio_trace_init;
-	int bchannel_init;
-	int trace_length_init;
-	int	trace_on;
-	int trace_events_down;
-	int l1_trace;
-	int l2_trace;
-
-	/*
-	  Trace\Event Enable
-	*/
-	word trace_event_mask;
-	word current_trace_event_mask;
-
-	dword audio_tap_mask;
-	dword current_audio_tap_mask;
-	dword current_eye_pattern_mask;
-	int   audio_tap_pending;
-	int   eye_pattern_pending;
-
-	dword bchannel_trace_mask;
-	dword current_bchannel_trace_mask;
-
-
-	diva_trace_line_state_t lines[30];
-
-	int	parse_entries;
-	int	cur_parse_entry;
-	diva_strace_path2action_t *parse_table;
-
-	diva_trace_library_user_interface_t user_proc_table;
-
-	int line_parse_entry_first[30];
-	int line_parse_entry_last[30];
-
-	int modem_parse_entry_first[30];
-	int modem_parse_entry_last[30];
-
-	int fax_parse_entry_first[30];
-	int fax_parse_entry_last[30];
-
-	int statistic_parse_first;
-	int statistic_parse_last;
-
-	int mdm_statistic_parse_first;
-	int mdm_statistic_parse_last;
-
-	int fax_statistic_parse_first;
-	int fax_statistic_parse_last;
-
-	dword	line_init_event;
-	dword	modem_init_event;
-	dword	fax_init_event;
-
-	dword	pending_line_status;
-	dword	pending_modem_status;
-	dword	pending_fax_status;
-
-	dword clear_call_command;
-
-	int outgoing_ifc_stats;
-	int incoming_ifc_stats;
-	int modem_ifc_stats;
-	int fax_ifc_stats;
-	int b1_ifc_stats;
-	int b2_ifc_stats;
-	int d1_ifc_stats;
-	int d2_ifc_stats;
-
-	diva_trace_interface_state_t Interface;
-	diva_ifc_statistics_t				 InterfaceStat;
-} diva_strace_context_t;
-
-typedef struct _diva_man_var_header {
-	byte   escape;
-	byte   length;
-	byte   management_id;
-	byte   type;
-	byte   attribute;
-	byte   status;
-	byte   value_length;
-	byte	 path_length;
-} diva_man_var_header_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/man_defs.h b/drivers/isdn/hardware/eicon/man_defs.h
deleted file mode 100644
index 249c471..0000000
--- a/drivers/isdn/hardware/eicon/man_defs.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-/* Definitions for use with the Management Information Element      */
-
-/*------------------------------------------------------------------*/
-/* Management information element                                   */
-/* ----------------------------------------------------------       */
-/* Byte     Coding            Comment                               */
-/* ----------------------------------------------------------       */
-/*    0 | 0 1 1 1 1 1 1 1 | ESC                                     */
-/*    1 | 0 x x x x x x x | Length of information element (m-1)     */
-/*    2 | 1 0 0 0 0 0 0 0 | Management Information Id               */
-/*    3 | x x x x x x x x | Type                                    */
-/*    4 | x x x x x x x x | Attribute                               */
-/*    5 | x x x x x x x x | Status                                  */
-/*    6 | x x x x x x x x | Variable Value Length (m-n)             */
-/*    7 | x x x x x x x x | Path / Variable Name String Length (n-8)*/
-/* 8..n | x x x x x x x x | Path/Node Name String separated by '\'  */
-/* n..m | x x x x x x x x | Variable content                        */
-/*------------------------------------------------------------------*/
-
-/*------------------------------------------------------------------*/
-/* Type Field                                                       */
-/*                                                                  */
-/* MAN_READ:      not used                                          */
-/* MAN_WRITE:     not used                                          */
-/* MAN_EVENT_ON:  not used                                          */
-/* MAN_EVENT_OFF: not used                                          */
-/* MAN_INFO_IND:  type of variable                                  */
-/* MAN_EVENT_IND: type of variable                                  */
-/* MAN_TRACE_IND  not used                                          */
-/*------------------------------------------------------------------*/
-#define MI_DIR          0x01  /* Directory string (zero terminated) */
-#define MI_EXECUTE      0x02  /* Executable function (has no value) */
-#define MI_ASCIIZ       0x03  /* Zero terminated string             */
-#define MI_ASCII        0x04  /* String, first byte is length       */
-#define MI_NUMBER       0x05  /* Number string, first byte is length*/
-#define MI_TRACE        0x06  /* Trace information, format see below*/
-
-#define MI_FIXED_LENGTH 0x80  /* get length from MAN_INFO max_len   */
-#define MI_INT          0x81  /* number to display as signed int    */
-#define MI_UINT         0x82  /* number to display as unsigned int  */
-#define MI_HINT         0x83  /* number to display in hex format    */
-#define MI_HSTR         0x84  /* number to display as a hex string  */
-#define MI_BOOLEAN      0x85  /* number to display as boolean       */
-#define MI_IP_ADDRESS   0x86  /* number to display as IP address    */
-#define MI_BITFLD       0x87  /* number to display as bit field     */
-#define MI_SPID_STATE   0x88  /* state# of SPID initialisation      */
-
-/*------------------------------------------------------------------*/
-/* Attribute Field                                                  */
-/*                                                                  */
-/* MAN_READ:      not used                                          */
-/* MAN_WRITE:     not used                                          */
-/* MAN_EVENT_ON:  not used                                          */
-/* MAN_EVENT_OFF: not used                                          */
-/* MAN_INFO_IND:  set according to capabilities of that variable    */
-/* MAN_EVENT_IND: not used                                          */
-/* MAN_TRACE_IND  not used                                          */
-/*------------------------------------------------------------------*/
-#define MI_WRITE        0x01  /* Variable is writeable              */
-#define MI_EVENT        0x02  /* Variable can indicate changes      */
-
-/*------------------------------------------------------------------*/
-/* Status Field                                                     */
-/*                                                                  */
-/* MAN_READ:      not used                                          */
-/* MAN_WRITE:     not used                                          */
-/* MAN_EVENT_ON:  not used                                          */
-/* MAN_EVENT_OFF: not used                                          */
-/* MAN_INFO_IND:  set according to the actual status                */
-/* MAN_EVENT_IND: set according to the actual statu                 */
-/* MAN_TRACE_IND  not used                                          */
-/*------------------------------------------------------------------*/
-#define MI_LOCKED       0x01  /* write protected by another instance*/
-#define MI_EVENT_ON     0x02  /* Event logging switched on          */
-#define MI_PROTECTED    0x04  /* write protected by this instance   */
-
-/*------------------------------------------------------------------*/
-/* Data Format used for MAN_TRACE_IND (no MI-element used)          */
-/*------------------------------------------------------------------*/
-typedef struct mi_xlog_hdr_s MI_XLOG_HDR;
-struct mi_xlog_hdr_s
-{
-	unsigned long  time;   /* Timestamp in msec units                 */
-	unsigned short size;   /* Size of data that follows               */
-	unsigned short code;   /* code of trace event                     */
-};                       /* unspecified data follows this header    */
-
-/*------------------------------------------------------------------*/
-/* Trace mask definitions for trace events except B channel and     */
-/* debug trace events                                               */
-/*------------------------------------------------------------------*/
-#define TM_D_CHAN   0x0001  /* D-Channel        (D-.) Code 3,4      */
-#define TM_L_LAYER  0x0002  /* Low Layer        (LL)  Code 6,7      */
-#define TM_N_LAYER  0x0004  /* Network Layer    (N)   Code 14,15    */
-#define TM_DL_ERR   0x0008  /* Data Link Error  (MDL) Code 9        */
-#define TM_LAYER1   0x0010  /* Layer 1                Code 20       */
-#define TM_C_COMM   0x0020  /* Call Comment     (SIG) Code 5,21,22  */
-#define TM_M_DATA   0x0040  /* Modulation Data  (EYE) Code 23       */
-#define TM_STRING   0x0080  /* Sting data             Code 24       */
-#define TM_N_USED2  0x0100  /* not used                             */
-#define TM_N_USED3  0x0200  /* not used                             */
-#define TM_N_USED4  0x0400  /* not used                             */
-#define TM_N_USED5  0x0800  /* not used                             */
-#define TM_N_USED6  0x1000  /* not used                             */
-#define TM_N_USED7  0x2000  /* not used                             */
-#define TM_N_USED8  0x4000  /* not used                             */
-#define TM_REST     0x8000  /* Codes 10,11,12,13,16,18,19,128,129   */
-
-/*------ End of file -----------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/mdm_msg.h b/drivers/isdn/hardware/eicon/mdm_msg.h
deleted file mode 100644
index 0e6b2e0..0000000
--- a/drivers/isdn/hardware/eicon/mdm_msg.h
+++ /dev/null
@@ -1,346 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __EICON_MDM_MSG_H__
-#define __EICON_MDM_MSG_H__
-#define DSP_UDATA_INDICATION_DCD_OFF  0x01
-#define DSP_UDATA_INDICATION_DCD_ON  0x02
-#define DSP_UDATA_INDICATION_CTS_OFF  0x03
-#define DSP_UDATA_INDICATION_CTS_ON  0x04
-/* =====================================================================
-   DCD_OFF Message:
-   <word> time of DCD off (sampled from counter at 8kHz)
-   DCD_ON Message:
-   <word> time of DCD on (sampled from counter at 8kHz)
-   <byte> connected norm
-   <word> connected options
-   <dword> connected speed (bit/s, max of tx and rx speed)
-   <word> roundtrip delay (ms)
-   <dword> connected speed tx (bit/s)
-   <dword> connected speed rx (bit/s)
-   Size of this message == 19 bytes, but we will receive only 11
-   ===================================================================== */
-#define DSP_CONNECTED_NORM_UNSPECIFIED      0
-#define DSP_CONNECTED_NORM_V21              1
-#define DSP_CONNECTED_NORM_V23              2
-#define DSP_CONNECTED_NORM_V22              3
-#define DSP_CONNECTED_NORM_V22_BIS          4
-#define DSP_CONNECTED_NORM_V32_BIS          5
-#define DSP_CONNECTED_NORM_V34              6
-#define DSP_CONNECTED_NORM_V8               7
-#define DSP_CONNECTED_NORM_BELL_212A        8
-#define DSP_CONNECTED_NORM_BELL_103         9
-#define DSP_CONNECTED_NORM_V29_LEASED_LINE  10
-#define DSP_CONNECTED_NORM_V33_LEASED_LINE  11
-#define DSP_CONNECTED_NORM_V90              12
-#define DSP_CONNECTED_NORM_V21_CH2          13
-#define DSP_CONNECTED_NORM_V27_TER          14
-#define DSP_CONNECTED_NORM_V29              15
-#define DSP_CONNECTED_NORM_V33              16
-#define DSP_CONNECTED_NORM_V17              17
-#define DSP_CONNECTED_NORM_V32              18
-#define DSP_CONNECTED_NORM_K56_FLEX         19
-#define DSP_CONNECTED_NORM_X2               20
-#define DSP_CONNECTED_NORM_V18              21
-#define DSP_CONNECTED_NORM_V18_LOW_HIGH     22
-#define DSP_CONNECTED_NORM_V18_HIGH_LOW     23
-#define DSP_CONNECTED_NORM_V21_LOW_HIGH     24
-#define DSP_CONNECTED_NORM_V21_HIGH_LOW     25
-#define DSP_CONNECTED_NORM_BELL103_LOW_HIGH 26
-#define DSP_CONNECTED_NORM_BELL103_HIGH_LOW 27
-#define DSP_CONNECTED_NORM_V23_75_1200      28
-#define DSP_CONNECTED_NORM_V23_1200_75      29
-#define DSP_CONNECTED_NORM_EDT_110          30
-#define DSP_CONNECTED_NORM_BAUDOT_45        31
-#define DSP_CONNECTED_NORM_BAUDOT_47        32
-#define DSP_CONNECTED_NORM_BAUDOT_50        33
-#define DSP_CONNECTED_NORM_DTMF             34
-#define DSP_CONNECTED_NORM_V18_RESERVED_13  35
-#define DSP_CONNECTED_NORM_V18_RESERVED_14  36
-#define DSP_CONNECTED_NORM_V18_RESERVED_15  37
-#define DSP_CONNECTED_NORM_VOWN             38
-#define DSP_CONNECTED_NORM_V23_OFF_HOOK     39
-#define DSP_CONNECTED_NORM_V23_ON_HOOK      40
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_3  41
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_4  42
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_5  43
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_6  44
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_7  45
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_8  46
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_9  47
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_10 48
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_11 49
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_12 50
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_13 51
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_14 52
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_15 53
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_16 54
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_17 55
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_18 56
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_19 57
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_20 58
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_21 59
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_22 60
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_23 61
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_24 62
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_25 63
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_26 64
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_27 65
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_28 66
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_29 67
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_30 68
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_31 69
-#define DSP_CONNECTED_OPTION_TRELLIS             0x0001
-#define DSP_CONNECTED_OPTION_V42_TRANS           0x0002
-#define DSP_CONNECTED_OPTION_V42_LAPM            0x0004
-#define DSP_CONNECTED_OPTION_SHORT_TRAIN         0x0008
-#define DSP_CONNECTED_OPTION_TALKER_ECHO_PROTECT 0x0010
-#define DSP_CONNECTED_OPTION_V42BIS              0x0020
-#define DSP_CONNECTED_OPTION_MNP2                0x0040
-#define DSP_CONNECTED_OPTION_MNP3                0x0080
-#define DSP_CONNECTED_OPTION_MNP4                0x00c0
-#define DSP_CONNECTED_OPTION_MNP5                0x0100
-#define DSP_CONNECTED_OPTION_MNP10               0x0200
-#define DSP_CONNECTED_OPTION_MASK_V42            0x0024
-#define DSP_CONNECTED_OPTION_MASK_MNP            0x03c0
-#define DSP_CONNECTED_OPTION_MASK_ERROR_CORRECT  0x03e4
-#define DSP_CONNECTED_OPTION_MASK_COMPRESSION    0x0320
-#define DSP_UDATA_INDICATION_DISCONNECT         5
-/*
-  returns:
-  <byte> cause
-*/
-/* ==========================================================
-   DLC: B2 modem configuration
-   ========================================================== */
-/*
-  Fields in assign DLC information element for modem protocol V.42/MNP:
-  <byte> length of information element
-  <word> information field length
-  <byte> address A       (not used, default 3)
-  <byte> address B       (not used, default 1)
-  <byte> modulo mode     (not used, default 7)
-  <byte> window size     (not used, default 7)
-  <word> XID length      (not used, default 0)
-  ...    XID information (not used, default empty)
-  <byte> modem protocol negotiation options
-  <byte> modem protocol options
-  <byte> modem protocol break configuration
-  <byte> modem protocol application options
-*/
-#define DLC_MODEMPROT_DISABLE_V42_V42BIS     0x01
-#define DLC_MODEMPROT_DISABLE_MNP_MNP5       0x02
-#define DLC_MODEMPROT_REQUIRE_PROTOCOL       0x04
-#define DLC_MODEMPROT_DISABLE_V42_DETECT     0x08
-#define DLC_MODEMPROT_DISABLE_COMPRESSION    0x10
-#define DLC_MODEMPROT_REQUIRE_PROTOCOL_V34UP 0x20
-#define DLC_MODEMPROT_NO_PROTOCOL_IF_1200    0x01
-#define DLC_MODEMPROT_BUFFER_IN_V42_DETECT   0x02
-#define DLC_MODEMPROT_DISABLE_V42_SREJ       0x04
-#define DLC_MODEMPROT_DISABLE_MNP3           0x08
-#define DLC_MODEMPROT_DISABLE_MNP4           0x10
-#define DLC_MODEMPROT_DISABLE_MNP10          0x20
-#define DLC_MODEMPROT_NO_PROTOCOL_IF_V22BIS  0x40
-#define DLC_MODEMPROT_NO_PROTOCOL_IF_V32BIS  0x80
-#define DLC_MODEMPROT_BREAK_DISABLED         0x00
-#define DLC_MODEMPROT_BREAK_NORMAL           0x01
-#define DLC_MODEMPROT_BREAK_EXPEDITED        0x02
-#define DLC_MODEMPROT_BREAK_DESTRUCTIVE      0x03
-#define DLC_MODEMPROT_BREAK_CONFIG_MASK      0x03
-#define DLC_MODEMPROT_APPL_EARLY_CONNECT     0x01
-#define DLC_MODEMPROT_APPL_PASS_INDICATIONS  0x02
-/* ==========================================================
-   CAI parameters used for the modem L1 configuration
-   ========================================================== */
-/*
-  Fields in assign CAI information element:
-  <byte> length of information element
-  <byte> info field and B-channel hardware
-  <byte> rate adaptation bit rate
-  <byte> async framing parameters
-  <byte> reserved
-  <word> packet length
-  <byte> modem line taking options
-  <byte> modem modulation negotiation parameters
-  <byte> modem modulation options
-  <byte> modem disabled modulations mask low
-  <byte> modem disabled modulations mask high
-  <byte> modem enabled modulations mask
-  <word> modem min TX speed
-  <word> modem max TX speed
-  <word> modem min RX speed
-  <word> modem max RX speed
-  <byte> modem disabled symbol rates mask
-  <byte> modem info options mask
-  <byte> modem transmit level adjust
-  <byte> modem speaker parameters
-  <word> modem private debug config
-  <struct> modem reserved
-  <struct> v18 config parameters
-  <struct> v18 probing sequence
-  <struct> v18 probing message
-*/
-#define DSP_CAI_HARDWARE_HDLC_64K          0x05
-#define DSP_CAI_HARDWARE_HDLC_56K          0x08
-#define DSP_CAI_HARDWARE_TRANSP            0x09
-#define DSP_CAI_HARDWARE_V110_SYNC         0x0c
-#define DSP_CAI_HARDWARE_V110_ASYNC        0x0d
-#define DSP_CAI_HARDWARE_HDLC_128K         0x0f
-#define DSP_CAI_HARDWARE_FAX               0x10
-#define DSP_CAI_HARDWARE_MODEM_ASYNC       0x11
-#define DSP_CAI_HARDWARE_MODEM_SYNC        0x12
-#define DSP_CAI_HARDWARE_V110_HDLCA        0x13
-#define DSP_CAI_HARDWARE_ADVANCED_VOICE    0x14
-#define DSP_CAI_HARDWARE_TRANSP_DTMF       0x16
-#define DSP_CAI_HARDWARE_DTMF_VOICE_ISDN   0x17
-#define DSP_CAI_HARDWARE_DTMF_VOICE_LOCAL  0x18
-#define DSP_CAI_HARDWARE_MASK              0x3f
-#define DSP_CAI_ENABLE_INFO_INDICATIONS    0x80
-#define DSP_CAI_RATE_ADAPTATION_300        0x00
-#define DSP_CAI_RATE_ADAPTATION_600        0x01
-#define DSP_CAI_RATE_ADAPTATION_1200       0x02
-#define DSP_CAI_RATE_ADAPTATION_2400       0x03
-#define DSP_CAI_RATE_ADAPTATION_4800       0x04
-#define DSP_CAI_RATE_ADAPTATION_9600       0x05
-#define DSP_CAI_RATE_ADAPTATION_19200      0x06
-#define DSP_CAI_RATE_ADAPTATION_38400      0x07
-#define DSP_CAI_RATE_ADAPTATION_48000      0x08
-#define DSP_CAI_RATE_ADAPTATION_56000      0x09
-#define DSP_CAI_RATE_ADAPTATION_7200       0x0a
-#define DSP_CAI_RATE_ADAPTATION_14400      0x0b
-#define DSP_CAI_RATE_ADAPTATION_28800      0x0c
-#define DSP_CAI_RATE_ADAPTATION_12000      0x0d
-#define DSP_CAI_RATE_ADAPTATION_1200_75    0x0e
-#define DSP_CAI_RATE_ADAPTATION_75_1200    0x0f
-#define DSP_CAI_RATE_ADAPTATION_MASK       0x0f
-#define DSP_CAI_ASYNC_PARITY_ENABLE        0x01
-#define DSP_CAI_ASYNC_PARITY_SPACE         0x00
-#define DSP_CAI_ASYNC_PARITY_ODD           0x02
-#define DSP_CAI_ASYNC_PARITY_EVEN          0x04
-#define DSP_CAI_ASYNC_PARITY_MARK          0x06
-#define DSP_CAI_ASYNC_PARITY_MASK          0x06
-#define DSP_CAI_ASYNC_ONE_STOP_BIT         0x00
-#define DSP_CAI_ASYNC_TWO_STOP_BITS        0x20
-#define DSP_CAI_ASYNC_CHAR_LENGTH_8        0x00
-#define DSP_CAI_ASYNC_CHAR_LENGTH_7        0x40
-#define DSP_CAI_ASYNC_CHAR_LENGTH_6        0x80
-#define DSP_CAI_ASYNC_CHAR_LENGTH_5        0xc0
-#define DSP_CAI_ASYNC_CHAR_LENGTH_MASK     0xc0
-#define DSP_CAI_MODEM_LEASED_LINE_MODE     0x01
-#define DSP_CAI_MODEM_4_WIRE_OPERATION     0x02
-#define DSP_CAI_MODEM_DISABLE_BUSY_DETECT  0x04
-#define DSP_CAI_MODEM_DISABLE_CALLING_TONE 0x08
-#define DSP_CAI_MODEM_DISABLE_ANSWER_TONE  0x10
-#define DSP_CAI_MODEM_ENABLE_DIAL_TONE_DET 0x20
-#define DSP_CAI_MODEM_USE_POTS_INTERFACE   0x40
-#define DSP_CAI_MODEM_FORCE_RAY_TAYLOR_FAX 0x80
-#define DSP_CAI_MODEM_NEGOTIATE_HIGHEST    0x00
-#define DSP_CAI_MODEM_NEGOTIATE_DISABLED   0x01
-#define DSP_CAI_MODEM_NEGOTIATE_IN_CLASS   0x02
-#define DSP_CAI_MODEM_NEGOTIATE_V100       0x03
-#define DSP_CAI_MODEM_NEGOTIATE_V8         0x04
-#define DSP_CAI_MODEM_NEGOTIATE_V8BIS      0x05
-#define DSP_CAI_MODEM_NEGOTIATE_MASK       0x07
-#define DSP_CAI_MODEM_GUARD_TONE_NONE      0x00
-#define DSP_CAI_MODEM_GUARD_TONE_550HZ     0x40
-#define DSP_CAI_MODEM_GUARD_TONE_1800HZ    0x80
-#define DSP_CAI_MODEM_GUARD_TONE_MASK      0xc0
-#define DSP_CAI_MODEM_DISABLE_RETRAIN      0x01
-#define DSP_CAI_MODEM_DISABLE_STEPUPDOWN   0x02
-#define DSP_CAI_MODEM_DISABLE_SPLIT_SPEED  0x04
-#define DSP_CAI_MODEM_DISABLE_TRELLIS      0x08
-#define DSP_CAI_MODEM_ALLOW_RDL_TEST_LOOP  0x10
-#define DSP_CAI_MODEM_DISABLE_FLUSH_TIMER  0x40
-#define DSP_CAI_MODEM_REVERSE_DIRECTION    0x80
-#define DSP_CAI_MODEM_DISABLE_V21          0x01
-#define DSP_CAI_MODEM_DISABLE_V23          0x02
-#define DSP_CAI_MODEM_DISABLE_V22          0x04
-#define DSP_CAI_MODEM_DISABLE_V22BIS       0x08
-#define DSP_CAI_MODEM_DISABLE_V32          0x10
-#define DSP_CAI_MODEM_DISABLE_V32BIS       0x20
-#define DSP_CAI_MODEM_DISABLE_V34          0x40
-#define DSP_CAI_MODEM_DISABLE_V90          0x80
-#define DSP_CAI_MODEM_DISABLE_BELL103      0x01
-#define DSP_CAI_MODEM_DISABLE_BELL212A     0x02
-#define DSP_CAI_MODEM_DISABLE_VFC          0x04
-#define DSP_CAI_MODEM_DISABLE_K56FLEX      0x08
-#define DSP_CAI_MODEM_DISABLE_X2           0x10
-#define DSP_CAI_MODEM_ENABLE_V29FDX        0x01
-#define DSP_CAI_MODEM_ENABLE_V33           0x02
-#define DSP_CAI_MODEM_DISABLE_2400_SYMBOLS 0x01
-#define DSP_CAI_MODEM_DISABLE_2743_SYMBOLS 0x02
-#define DSP_CAI_MODEM_DISABLE_2800_SYMBOLS 0x04
-#define DSP_CAI_MODEM_DISABLE_3000_SYMBOLS 0x08
-#define DSP_CAI_MODEM_DISABLE_3200_SYMBOLS 0x10
-#define DSP_CAI_MODEM_DISABLE_3429_SYMBOLS 0x20
-#define DSP_CAI_MODEM_DISABLE_TX_REDUCTION 0x01
-#define DSP_CAI_MODEM_DISABLE_PRECODING    0x02
-#define DSP_CAI_MODEM_DISABLE_PREEMPHASIS  0x04
-#define DSP_CAI_MODEM_DISABLE_SHAPING      0x08
-#define DSP_CAI_MODEM_DISABLE_NONLINEAR_EN 0x10
-#define DSP_CAI_MODEM_SPEAKER_OFF          0x00
-#define DSP_CAI_MODEM_SPEAKER_DURING_TRAIN 0x01
-#define DSP_CAI_MODEM_SPEAKER_TIL_CONNECT  0x02
-#define DSP_CAI_MODEM_SPEAKER_ALWAYS_ON    0x03
-#define DSP_CAI_MODEM_SPEAKER_CONTROL_MASK 0x03
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_MIN   0x00
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_LOW   0x04
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_HIGH  0x08
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_MAX   0x0c
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_MASK  0x0c
-/* ==========================================================
-   DCD/CTS State
-   ========================================================== */
-#define MDM_WANT_CONNECT_B3_ACTIVE_I  0x01
-#define MDM_NCPI_VALID                0x02
-#define MDM_NCPI_CTS_ON_RECEIVED      0x04
-#define MDM_NCPI_DCD_ON_RECEIVED      0x08
-/* ==========================================================
-   CAPI NCPI Constants
-   ========================================================== */
-#define MDM_NCPI_ECM_V42              0x0001
-#define MDM_NCPI_ECM_MNP              0x0002
-#define MDM_NCPI_TRANSPARENT          0x0004
-#define MDM_NCPI_COMPRESSED           0x0010
-/* ==========================================================
-   CAPI B2 Config Constants
-   ========================================================== */
-#define MDM_B2_DISABLE_V42bis         0x0001
-#define MDM_B2_DISABLE_MNP            0x0002
-#define MDM_B2_DISABLE_TRANS          0x0004
-#define MDM_B2_DISABLE_V42            0x0008
-#define MDM_B2_DISABLE_COMP           0x0010
-/* ==========================================================
-   CAPI B1 Config Constants
-   ========================================================== */
-#define MDM_CAPI_DISABLE_RETRAIN      0x0001
-#define MDM_CAPI_DISABLE_RING_TONE    0x0002
-#define MDM_CAPI_GUARD_1800           0x0004
-#define MDM_CAPI_GUARD_550            0x0008
-#define MDM_CAPI_NEG_V8               0x0003
-#define MDM_CAPI_NEG_V100             0x0002
-#define MDM_CAPI_NEG_MOD_CLASS        0x0001
-#define MDM_CAPI_NEG_DISABLED         0x0000
-#endif
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
deleted file mode 100644
index def7992..0000000
--- a/drivers/isdn/hardware/eicon/message.c
+++ /dev/null
@@ -1,14954 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/bitmap.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "capi20.h"
-#include "divacapi.h"
-#include "mdm_msg.h"
-#include "divasync.h"
-
-#define FILE_ "MESSAGE.C"
-#define dprintf
-
-/*------------------------------------------------------------------*/
-/* This is options supported for all adapters that are server by    */
-/* XDI driver. Allo it is not necessary to ask it from every adapter*/
-/* and it is not necessary to save it separate for every adapter    */
-/* Macrose defined here have only local meaning                     */
-/*------------------------------------------------------------------*/
-static dword diva_xdi_extended_features = 0;
-
-#define DIVA_CAPI_USE_CMA                 0x00000001
-#define DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR  0x00000002
-#define DIVA_CAPI_XDI_PROVIDES_NO_CANCEL  0x00000004
-#define DIVA_CAPI_XDI_PROVIDES_RX_DMA     0x00000008
-
-/*
-  CAPI can request to process all return codes self only if:
-  protocol code supports this && xdi supports this
-*/
-#define DIVA_CAPI_SUPPORTS_NO_CANCEL(__a__)   (((__a__)->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL) && ((__a__)->manufacturer_features & MANUFACTURER_FEATURE_OK_FC_LABEL) && (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_NO_CANCEL))
-
-/*------------------------------------------------------------------*/
-/* local function prototypes                                        */
-/*------------------------------------------------------------------*/
-
-static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci);
-void AutomaticLaw(DIVA_CAPI_ADAPTER *);
-word CapiRelease(word);
-word CapiRegister(word);
-word api_put(APPL *, CAPI_MSG *);
-static word api_parse(byte *, word, byte *, API_PARSE *);
-static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out);
-static void api_load_msg(API_SAVE *in, API_PARSE *out);
-
-word api_remove_start(void);
-void api_remove_complete(void);
-
-static void plci_remove(PLCI *);
-static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a);
-static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *, IDI_SYNC_REQ *);
-
-void callback(ENTITY *);
-
-static void control_rc(PLCI *, byte, byte, byte, byte, byte);
-static void data_rc(PLCI *, byte);
-static void data_ack(PLCI *, byte);
-static void sig_ind(PLCI *);
-static void SendInfo(PLCI *, dword, byte **, byte);
-static void SendSetupInfo(APPL *, PLCI *, dword, byte **, byte);
-static void SendSSExtInd(APPL *, PLCI *plci, dword Id, byte **parms);
-
-static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms);
-
-static void nl_ind(PLCI *);
-
-static byte connect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte listen_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte info_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte info_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte alert_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte facility_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte facility_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte data_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte data_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte reset_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte reset_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_t90_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte select_b_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte manufacturer_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte manufacturer_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-
-static word get_plci(DIVA_CAPI_ADAPTER *);
-static void add_p(PLCI *, byte, byte *);
-static void add_s(PLCI *plci, byte code, API_PARSE *p);
-static void add_ss(PLCI *plci, byte code, API_PARSE *p);
-static void add_ie(PLCI *plci, byte code, byte *p, word p_length);
-static void add_d(PLCI *, word, byte *);
-static void add_ai(PLCI *, API_PARSE *);
-static word add_b1(PLCI *, API_PARSE *, word, word);
-static word add_b23(PLCI *, API_PARSE *);
-static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms);
-static void sig_req(PLCI *, byte, byte);
-static void nl_req_ncci(PLCI *, byte, byte);
-static void send_req(PLCI *);
-static void send_data(PLCI *);
-static word plci_remove_check(PLCI *);
-static void listen_check(DIVA_CAPI_ADAPTER *);
-static byte AddInfo(byte **, byte **, byte *, byte *);
-static byte getChannel(API_PARSE *);
-static void IndParse(PLCI *, const word *, byte **, byte);
-static byte ie_compare(byte *, byte *);
-static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
-static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
-
-/*
-  XON protocol helpers
-*/
-static void channel_flow_control_remove(PLCI *plci);
-static void channel_x_off(PLCI *plci, byte ch, byte flag);
-static void channel_x_on(PLCI *plci, byte ch);
-static void channel_request_xon(PLCI *plci, byte ch);
-static void channel_xmit_xon(PLCI *plci);
-static int channel_can_xon(PLCI *plci, byte ch);
-static void channel_xmit_extended_xon(PLCI *plci);
-
-static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, dword info_mask, byte setupParse);
-static word AdvCodecSupport(DIVA_CAPI_ADAPTER *, PLCI *, APPL *, byte);
-static void CodecIdCheck(DIVA_CAPI_ADAPTER *, PLCI *);
-static void SetVoiceChannel(PLCI *, byte *, DIVA_CAPI_ADAPTER *);
-static void VoiceChannelOff(PLCI *plci);
-static void adv_voice_write_coefs(PLCI *plci, word write_command);
-static void adv_voice_clear_config(PLCI *plci);
-
-static word get_b1_facilities(PLCI *plci, byte b1_resource);
-static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities);
-static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities);
-static word adjust_b_process(dword Id, PLCI *plci, byte Rc);
-static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command);
-static void adjust_b_restore(dword Id, PLCI *plci, byte Rc);
-static void reset_b3_command(dword Id, PLCI *plci, byte Rc);
-static void select_b_command(dword Id, PLCI *plci, byte Rc);
-static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc);
-static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc);
-static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc);
-static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc);
-static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc);
-static void hold_save_command(dword Id, PLCI *plci, byte Rc);
-static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc);
-static void init_b1_config(PLCI *plci);
-static void clear_b1_config(PLCI *plci);
-
-static void dtmf_command(dword Id, PLCI *plci, byte Rc);
-static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
-static void dtmf_confirmation(dword Id, PLCI *plci);
-static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length);
-static void dtmf_parameter_write(PLCI *plci);
-
-
-static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id);
-static void mixer_set_bchannel_id(PLCI *plci, byte *chi);
-static void mixer_clear_config(PLCI *plci);
-static void mixer_notify_update(PLCI *plci, byte others);
-static void mixer_command(dword Id, PLCI *plci, byte Rc);
-static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
-static void mixer_indication_coefs_set(dword Id, PLCI *plci);
-static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length);
-static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length);
-static void mixer_remove(PLCI *plci);
-
-
-static void ec_command(dword Id, PLCI *plci, byte Rc);
-static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
-static void ec_indication(dword Id, PLCI *plci, byte *msg, word length);
-
-
-static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc);
-static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc);
-
-
-static int diva_get_dma_descriptor(PLCI *plci, dword *dma_magic);
-static void diva_free_dma_descriptor(PLCI *plci, int nr);
-
-/*------------------------------------------------------------------*/
-/* external function prototypes                                     */
-/*------------------------------------------------------------------*/
-
-extern byte MapController(byte);
-extern byte UnMapController(byte);
-#define MapId(Id)(((Id) & 0xffffff00L) | MapController((byte)(Id)))
-#define UnMapId(Id)(((Id) & 0xffffff00L) | UnMapController((byte)(Id)))
-
-void sendf(APPL *, word, dword, word, byte *, ...);
-void *TransmitBufferSet(APPL *appl, dword ref);
-void *TransmitBufferGet(APPL *appl, void *p);
-void TransmitBufferFree(APPL *appl, void *p);
-void *ReceiveBufferGet(APPL *appl, int Num);
-
-int fax_head_line_time(char *buffer);
-
-
-/*------------------------------------------------------------------*/
-/* Global data definitions                                          */
-/*------------------------------------------------------------------*/
-extern byte max_adapter;
-extern byte max_appl;
-extern DIVA_CAPI_ADAPTER *adapter;
-extern APPL *application;
-
-
-
-
-
-
-
-static byte remove_started = false;
-static PLCI dummy_plci;
-
-
-static struct _ftable {
-	word command;
-	byte *format;
-	byte (*function)(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-} ftable[] = {
-	{_DATA_B3_R,                          "dwww",         data_b3_req},
-	{_DATA_B3_I | RESPONSE,               "w",            data_b3_res},
-	{_INFO_R,                             "ss",           info_req},
-	{_INFO_I | RESPONSE,                  "",             info_res},
-	{_CONNECT_R,                          "wsssssssss",   connect_req},
-	{_CONNECT_I | RESPONSE,               "wsssss",       connect_res},
-	{_CONNECT_ACTIVE_I | RESPONSE,        "",             connect_a_res},
-	{_DISCONNECT_R,                       "s",            disconnect_req},
-	{_DISCONNECT_I | RESPONSE,            "",             disconnect_res},
-	{_LISTEN_R,                           "dddss",        listen_req},
-	{_ALERT_R,                            "s",            alert_req},
-	{_FACILITY_R,                         "ws",           facility_req},
-	{_FACILITY_I | RESPONSE,              "ws",           facility_res},
-	{_CONNECT_B3_R,                       "s",            connect_b3_req},
-	{_CONNECT_B3_I | RESPONSE,            "ws",           connect_b3_res},
-	{_CONNECT_B3_ACTIVE_I | RESPONSE,     "",             connect_b3_a_res},
-	{_DISCONNECT_B3_R,                    "s",            disconnect_b3_req},
-	{_DISCONNECT_B3_I | RESPONSE,         "",             disconnect_b3_res},
-	{_RESET_B3_R,                         "s",            reset_b3_req},
-	{_RESET_B3_I | RESPONSE,              "",             reset_b3_res},
-	{_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "ws",           connect_b3_t90_a_res},
-	{_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "",             connect_b3_t90_a_res},
-	{_SELECT_B_REQ,                       "s",            select_b_req},
-	{_MANUFACTURER_R,                     "dws",          manufacturer_req},
-	{_MANUFACTURER_I | RESPONSE,          "dws",          manufacturer_res},
-	{_MANUFACTURER_I | RESPONSE,          "",             manufacturer_res}
-};
-
-static byte *cip_bc[29][2] = {
-	{ "",                     ""                     }, /* 0 */
-	{ "\x03\x80\x90\xa3",     "\x03\x80\x90\xa2"     }, /* 1 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 2 */
-	{ "\x02\x89\x90",         "\x02\x89\x90"         }, /* 3 */
-	{ "\x03\x90\x90\xa3",     "\x03\x90\x90\xa2"     }, /* 4 */
-	{ "\x03\x91\x90\xa5",     "\x03\x91\x90\xa5"     }, /* 5 */
-	{ "\x02\x98\x90",         "\x02\x98\x90"         }, /* 6 */
-	{ "\x04\x88\xc0\xc6\xe6", "\x04\x88\xc0\xc6\xe6" }, /* 7 */
-	{ "\x04\x88\x90\x21\x8f", "\x04\x88\x90\x21\x8f" }, /* 8 */
-	{ "\x03\x91\x90\xa5",     "\x03\x91\x90\xa5"     }, /* 9 */
-	{ "",                     ""                     }, /* 10 */
-	{ "",                     ""                     }, /* 11 */
-	{ "",                     ""                     }, /* 12 */
-	{ "",                     ""                     }, /* 13 */
-	{ "",                     ""                     }, /* 14 */
-	{ "",                     ""                     }, /* 15 */
-
-	{ "\x03\x80\x90\xa3",     "\x03\x80\x90\xa2"     }, /* 16 */
-	{ "\x03\x90\x90\xa3",     "\x03\x90\x90\xa2"     }, /* 17 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 18 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 19 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 20 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 21 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 22 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 23 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 24 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }, /* 25 */
-	{ "\x03\x91\x90\xa5",     "\x03\x91\x90\xa5"     }, /* 26 */
-	{ "\x03\x91\x90\xa5",     "\x03\x91\x90\xa5"     }, /* 27 */
-	{ "\x02\x88\x90",         "\x02\x88\x90"         }  /* 28 */
-};
-
-static byte *cip_hlc[29] = {
-	"",                           /* 0 */
-	"",                           /* 1 */
-	"",                           /* 2 */
-	"",                           /* 3 */
-	"",                           /* 4 */
-	"",                           /* 5 */
-	"",                           /* 6 */
-	"",                           /* 7 */
-	"",                           /* 8 */
-	"",                           /* 9 */
-	"",                           /* 10 */
-	"",                           /* 11 */
-	"",                           /* 12 */
-	"",                           /* 13 */
-	"",                           /* 14 */
-	"",                           /* 15 */
-
-	"\x02\x91\x81",               /* 16 */
-	"\x02\x91\x84",               /* 17 */
-	"\x02\x91\xa1",               /* 18 */
-	"\x02\x91\xa4",               /* 19 */
-	"\x02\x91\xa8",               /* 20 */
-	"\x02\x91\xb1",               /* 21 */
-	"\x02\x91\xb2",               /* 22 */
-	"\x02\x91\xb5",               /* 23 */
-	"\x02\x91\xb8",               /* 24 */
-	"\x02\x91\xc1",               /* 25 */
-	"\x02\x91\x81",               /* 26 */
-	"\x03\x91\xe0\x01",           /* 27 */
-	"\x03\x91\xe0\x02"            /* 28 */
-};
-
-/*------------------------------------------------------------------*/
-
-#define V120_HEADER_LENGTH 1
-#define V120_HEADER_EXTEND_BIT  0x80
-#define V120_HEADER_BREAK_BIT   0x40
-#define V120_HEADER_C1_BIT      0x04
-#define V120_HEADER_C2_BIT      0x08
-#define V120_HEADER_FLUSH_COND  (V120_HEADER_BREAK_BIT | V120_HEADER_C1_BIT | V120_HEADER_C2_BIT)
-
-static byte v120_default_header[] =
-{
-
-	0x83                          /*  Ext, BR , res, res, C2 , C1 , B  , F   */
-
-};
-
-static byte v120_break_header[] =
-{
-
-	0xc3 | V120_HEADER_BREAK_BIT  /*  Ext, BR , res, res, C2 , C1 , B  , F   */
-
-};
-
-
-/*------------------------------------------------------------------*/
-/* API_PUT function                                                 */
-/*------------------------------------------------------------------*/
-
-word api_put(APPL *appl, CAPI_MSG *msg)
-{
-	word i, j, k, l, n;
-	word ret;
-	byte c;
-	byte controller;
-	DIVA_CAPI_ADAPTER *a;
-	PLCI *plci;
-	NCCI *ncci_ptr;
-	word ncci;
-	CAPI_MSG *m;
-	API_PARSE msg_parms[MAX_MSG_PARMS + 1];
-
-	if (msg->header.length < sizeof(msg->header) ||
-	    msg->header.length > MAX_MSG_SIZE) {
-		dbug(1, dprintf("bad len"));
-		return _BAD_MSG;
-	}
-
-	controller = (byte)((msg->header.controller & 0x7f) - 1);
-
-	/* controller starts with 0 up to (max_adapter - 1) */
-	if (controller >= max_adapter)
-	{
-		dbug(1, dprintf("invalid ctrl"));
-		return _BAD_MSG;
-	}
-
-	a = &adapter[controller];
-	plci = NULL;
-	if ((msg->header.plci != 0) && (msg->header.plci <= a->max_plci) && !a->adapter_disabled)
-	{
-		dbug(1, dprintf("plci=%x", msg->header.plci));
-		plci = &a->plci[msg->header.plci - 1];
-		ncci = GET_WORD(&msg->header.ncci);
-		if (plci->Id
-		    && (plci->appl
-			|| (plci->State == INC_CON_PENDING)
-			|| (plci->State == INC_CON_ALERT)
-			|| (msg->header.command == (_DISCONNECT_I | RESPONSE)))
-		    && ((ncci == 0)
-			|| (msg->header.command == (_DISCONNECT_B3_I | RESPONSE))
-			|| ((ncci < MAX_NCCI + 1) && (a->ncci_plci[ncci] == plci->Id))))
-		{
-			i = plci->msg_in_read_pos;
-			j = plci->msg_in_write_pos;
-			if (j >= i)
-			{
-				if (j + msg->header.length + MSG_IN_OVERHEAD <= MSG_IN_QUEUE_SIZE)
-					i += MSG_IN_QUEUE_SIZE - j;
-				else
-					j = 0;
-			}
-			else
-			{
-
-				n = (((CAPI_MSG *)(plci->msg_in_queue))->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc;
-
-				if (i > MSG_IN_QUEUE_SIZE - n)
-					i = MSG_IN_QUEUE_SIZE - n + 1;
-				i -= j;
-			}
-
-			if (i <= ((msg->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc))
-
-			{
-				dbug(0, dprintf("Q-FULL1(msg) - len=%d write=%d read=%d wrap=%d free=%d",
-						msg->header.length, plci->msg_in_write_pos,
-						plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
-
-				return _QUEUE_FULL;
-			}
-			c = false;
-			if ((((byte *) msg) < ((byte *)(plci->msg_in_queue)))
-			    || (((byte *) msg) >= ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
-			{
-				if (plci->msg_in_write_pos != plci->msg_in_read_pos)
-					c = true;
-			}
-			if (msg->header.command == _DATA_B3_R)
-			{
-				if (msg->header.length < 20)
-				{
-					dbug(1, dprintf("DATA_B3 REQ wrong length %d", msg->header.length));
-					return _BAD_MSG;
-				}
-				ncci_ptr = &(a->ncci[ncci]);
-				n = ncci_ptr->data_pending;
-				l = ncci_ptr->data_ack_pending;
-				k = plci->msg_in_read_pos;
-				while (k != plci->msg_in_write_pos)
-				{
-					if (k == plci->msg_in_wrap_pos)
-						k = 0;
-					if ((((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.command == _DATA_B3_R)
-					    && (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.ncci == ncci))
-					{
-						n++;
-						if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->info.data_b3_req.Flags & 0x0004)
-							l++;
-					}
-
-					k += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.length +
-					      MSG_IN_OVERHEAD + 3) & 0xfffc;
-
-				}
-				if ((n >= MAX_DATA_B3) || (l >= MAX_DATA_ACK))
-				{
-					dbug(0, dprintf("Q-FULL2(data) - pending=%d/%d ack_pending=%d/%d",
-							ncci_ptr->data_pending, n, ncci_ptr->data_ack_pending, l));
-
-					return _QUEUE_FULL;
-				}
-				if (plci->req_in || plci->internal_command)
-				{
-					if ((((byte *) msg) >= ((byte *)(plci->msg_in_queue)))
-					    && (((byte *) msg) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
-					{
-						dbug(0, dprintf("Q-FULL3(requeue)"));
-
-						return _QUEUE_FULL;
-					}
-					c = true;
-				}
-			}
-			else
-			{
-				if (plci->req_in || plci->internal_command)
-					c = true;
-				else
-				{
-					plci->command = msg->header.command;
-					plci->number = msg->header.number;
-				}
-			}
-			if (c)
-			{
-				dbug(1, dprintf("enqueue msg(0x%04x,0x%x,0x%x) - len=%d write=%d read=%d wrap=%d free=%d",
-						msg->header.command, plci->req_in, plci->internal_command,
-						msg->header.length, plci->msg_in_write_pos,
-						plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
-				if (j == 0)
-					plci->msg_in_wrap_pos = plci->msg_in_write_pos;
-				m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
-				for (i = 0; i < msg->header.length; i++)
-					((byte *)(plci->msg_in_queue))[j++] = ((byte *) msg)[i];
-				if (m->header.command == _DATA_B3_R)
-				{
-
-					m->info.data_b3_req.Data = (dword)(long)(TransmitBufferSet(appl, m->info.data_b3_req.Data));
-
-				}
-
-				j = (j + 3) & 0xfffc;
-
-				*((APPL **)(&((byte *)(plci->msg_in_queue))[j])) = appl;
-				plci->msg_in_write_pos = j + MSG_IN_OVERHEAD;
-				return 0;
-			}
-		}
-		else
-		{
-			plci = NULL;
-		}
-	}
-	dbug(1, dprintf("com=%x", msg->header.command));
-
-	for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
-	for (i = 0, ret = _BAD_MSG; i < ARRAY_SIZE(ftable); i++) {
-
-		if (ftable[i].command == msg->header.command) {
-			/* break loop if the message is correct, otherwise continue scan  */
-			/* (for example: CONNECT_B3_T90_ACT_RES has two specifications)   */
-			if (!api_parse(msg->info.b, (word)(msg->header.length - 12), ftable[i].format, msg_parms)) {
-				ret = 0;
-				break;
-			}
-			for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
-		}
-	}
-	if (ret) {
-		dbug(1, dprintf("BAD_MSG"));
-		if (plci) plci->command = 0;
-		return ret;
-	}
-
-
-	c = ftable[i].function(GET_DWORD(&msg->header.controller),
-			       msg->header.number,
-			       a,
-			       plci,
-			       appl,
-			       msg_parms);
-
-	channel_xmit_extended_xon(plci);
-
-	if (c == 1) send_req(plci);
-	if (c == 2 && plci) plci->req_in = plci->req_in_start = plci->req_out = 0;
-	if (plci && !plci->req_in) plci->command = 0;
-	return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/* api_parse function, check the format of api messages             */
-/*------------------------------------------------------------------*/
-
-static word api_parse(byte *msg, word length, byte *format, API_PARSE *parms)
-{
-	word i;
-	word p;
-
-	for (i = 0, p = 0; format[i]; i++) {
-		if (parms)
-		{
-			parms[i].info = &msg[p];
-		}
-		switch (format[i]) {
-		case 'b':
-			p += 1;
-			break;
-		case 'w':
-			p += 2;
-			break;
-		case 'd':
-			p += 4;
-			break;
-		case 's':
-			if (msg[p] == 0xff) {
-				parms[i].info += 2;
-				parms[i].length = msg[p + 1] + (msg[p + 2] << 8);
-				p += (parms[i].length + 3);
-			}
-			else {
-				parms[i].length = msg[p];
-				p += (parms[i].length + 1);
-			}
-			break;
-		}
-
-		if (p > length) return true;
-	}
-	if (parms) parms[i].info = NULL;
-	return false;
-}
-
-static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out)
-{
-	word i, j, n = 0;
-	byte *p;
-
-	p = out->info;
-	for (i = 0; format[i] != '\0'; i++)
-	{
-		out->parms[i].info = p;
-		out->parms[i].length = in[i].length;
-		switch (format[i])
-		{
-		case 'b':
-			n = 1;
-			break;
-		case 'w':
-			n = 2;
-			break;
-		case 'd':
-			n = 4;
-			break;
-		case 's':
-			n = in[i].length + 1;
-			break;
-		}
-		for (j = 0; j < n; j++)
-			*(p++) = in[i].info[j];
-	}
-	out->parms[i].info = NULL;
-	out->parms[i].length = 0;
-}
-
-static void api_load_msg(API_SAVE *in, API_PARSE *out)
-{
-	word i;
-
-	i = 0;
-	do
-	{
-		out[i].info = in->parms[i].info;
-		out[i].length = in->parms[i].length;
-	} while (in->parms[i++].info);
-}
-
-
-/*------------------------------------------------------------------*/
-/* CAPI remove function                                             */
-/*------------------------------------------------------------------*/
-
-word api_remove_start(void)
-{
-	word i;
-	word j;
-
-	if (!remove_started) {
-		remove_started = true;
-		for (i = 0; i < max_adapter; i++) {
-			if (adapter[i].request) {
-				for (j = 0; j < adapter[i].max_plci; j++) {
-					if (adapter[i].plci[j].Sig.Id) plci_remove(&adapter[i].plci[j]);
-				}
-			}
-		}
-		return 1;
-	}
-	else {
-		for (i = 0; i < max_adapter; i++) {
-			if (adapter[i].request) {
-				for (j = 0; j < adapter[i].max_plci; j++) {
-					if (adapter[i].plci[j].Sig.Id) return 1;
-				}
-			}
-		}
-	}
-	api_remove_complete();
-	return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/* internal command queue                                           */
-/*------------------------------------------------------------------*/
-
-static void init_internal_command_queue(PLCI *plci)
-{
-	word i;
-
-	dbug(1, dprintf("%s,%d: init_internal_command_queue",
-			(char *)(FILE_), __LINE__));
-
-	plci->internal_command = 0;
-	for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS; i++)
-		plci->internal_command_queue[i] = NULL;
-}
-
-
-static void start_internal_command(dword Id, PLCI *plci, t_std_internal_command command_function)
-{
-	word i;
-
-	dbug(1, dprintf("[%06lx] %s,%d: start_internal_command",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	if (plci->internal_command == 0)
-	{
-		plci->internal_command_queue[0] = command_function;
-		(*command_function)(Id, plci, OK);
-	}
-	else
-	{
-		i = 1;
-		while (plci->internal_command_queue[i] != NULL)
-			i++;
-		plci->internal_command_queue[i] = command_function;
-	}
-}
-
-
-static void next_internal_command(dword Id, PLCI *plci)
-{
-	word i;
-
-	dbug(1, dprintf("[%06lx] %s,%d: next_internal_command",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	plci->internal_command = 0;
-	plci->internal_command_queue[0] = NULL;
-	while (plci->internal_command_queue[1] != NULL)
-	{
-		for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS - 1; i++)
-			plci->internal_command_queue[i] = plci->internal_command_queue[i + 1];
-		plci->internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS - 1] = NULL;
-		(*(plci->internal_command_queue[0]))(Id, plci, OK);
-		if (plci->internal_command != 0)
-			return;
-		plci->internal_command_queue[0] = NULL;
-	}
-}
-
-
-/*------------------------------------------------------------------*/
-/* NCCI allocate/remove function                                    */
-/*------------------------------------------------------------------*/
-
-static dword ncci_mapping_bug = 0;
-
-static word get_ncci(PLCI *plci, byte ch, word force_ncci)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word ncci, i, j, k;
-
-	a = plci->adapter;
-	if (!ch || a->ch_ncci[ch])
-	{
-		ncci_mapping_bug++;
-		dbug(1, dprintf("NCCI mapping exists %ld %02x %02x %02x-%02x",
-				ncci_mapping_bug, ch, force_ncci, a->ncci_ch[a->ch_ncci[ch]], a->ch_ncci[ch]));
-		ncci = ch;
-	}
-	else
-	{
-		if (force_ncci)
-			ncci = force_ncci;
-		else
-		{
-			if ((ch < MAX_NCCI + 1) && !a->ncci_ch[ch])
-				ncci = ch;
-			else
-			{
-				ncci = 1;
-				while ((ncci < MAX_NCCI + 1) && a->ncci_ch[ncci])
-					ncci++;
-				if (ncci == MAX_NCCI + 1)
-				{
-					ncci_mapping_bug++;
-					i = 1;
-					do
-					{
-						j = 1;
-						while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i))
-							j++;
-						k = j;
-						if (j < MAX_NCCI + 1)
-						{
-							do
-							{
-								j++;
-							} while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i));
-						}
-					} while ((i < MAX_NL_CHANNEL + 1) && (j < MAX_NCCI + 1));
-					if (i < MAX_NL_CHANNEL + 1)
-					{
-						dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x %02x-%02x-%02x",
-								ncci_mapping_bug, ch, force_ncci, i, k, j));
-					}
-					else
-					{
-						dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x",
-								ncci_mapping_bug, ch, force_ncci));
-					}
-					ncci = ch;
-				}
-			}
-			a->ncci_plci[ncci] = plci->Id;
-			a->ncci_state[ncci] = IDLE;
-			if (!plci->ncci_ring_list)
-				plci->ncci_ring_list = ncci;
-			else
-				a->ncci_next[ncci] = a->ncci_next[plci->ncci_ring_list];
-			a->ncci_next[plci->ncci_ring_list] = (byte) ncci;
-		}
-		a->ncci_ch[ncci] = ch;
-		a->ch_ncci[ch] = (byte) ncci;
-		dbug(1, dprintf("NCCI mapping established %ld %02x %02x %02x-%02x",
-				ncci_mapping_bug, ch, force_ncci, ch, ncci));
-	}
-	return (ncci);
-}
-
-
-static void ncci_free_receive_buffers(PLCI *plci, word ncci)
-{
-	DIVA_CAPI_ADAPTER *a;
-	APPL *appl;
-	word i, ncci_code;
-	dword Id;
-
-	a = plci->adapter;
-	Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
-	if (ncci)
-	{
-		if (a->ncci_plci[ncci] == plci->Id)
-		{
-			if (!plci->appl)
-			{
-				ncci_mapping_bug++;
-				dbug(1, dprintf("NCCI mapping appl expected %ld %08lx",
-						ncci_mapping_bug, Id));
-			}
-			else
-			{
-				appl = plci->appl;
-				ncci_code = ncci | (((word) a->Id) << 8);
-				for (i = 0; i < appl->MaxBuffer; i++)
-				{
-					if ((appl->DataNCCI[i] == ncci_code)
-					    && (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
-					{
-						appl->DataNCCI[i] = 0;
-					}
-				}
-			}
-		}
-	}
-	else
-	{
-		for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
-		{
-			if (a->ncci_plci[ncci] == plci->Id)
-			{
-				if (!plci->appl)
-				{
-					ncci_mapping_bug++;
-					dbug(1, dprintf("NCCI mapping no appl %ld %08lx",
-							ncci_mapping_bug, Id));
-				}
-				else
-				{
-					appl = plci->appl;
-					ncci_code = ncci | (((word) a->Id) << 8);
-					for (i = 0; i < appl->MaxBuffer; i++)
-					{
-						if ((appl->DataNCCI[i] == ncci_code)
-						    && (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
-						{
-							appl->DataNCCI[i] = 0;
-						}
-					}
-				}
-			}
-		}
-	}
-}
-
-
-static void cleanup_ncci_data(PLCI *plci, word ncci)
-{
-	NCCI *ncci_ptr;
-
-	if (ncci && (plci->adapter->ncci_plci[ncci] == plci->Id))
-	{
-		ncci_ptr = &(plci->adapter->ncci[ncci]);
-		if (plci->appl)
-		{
-			while (ncci_ptr->data_pending != 0)
-			{
-				if (!plci->data_sent || (ncci_ptr->DBuffer[ncci_ptr->data_out].P != plci->data_sent_ptr))
-					TransmitBufferFree(plci->appl, ncci_ptr->DBuffer[ncci_ptr->data_out].P);
-				(ncci_ptr->data_out)++;
-				if (ncci_ptr->data_out == MAX_DATA_B3)
-					ncci_ptr->data_out = 0;
-				(ncci_ptr->data_pending)--;
-			}
-		}
-		ncci_ptr->data_out = 0;
-		ncci_ptr->data_pending = 0;
-		ncci_ptr->data_ack_out = 0;
-		ncci_ptr->data_ack_pending = 0;
-	}
-}
-
-
-static void ncci_remove(PLCI *plci, word ncci, byte preserve_ncci)
-{
-	DIVA_CAPI_ADAPTER *a;
-	dword Id;
-	word i;
-
-	a = plci->adapter;
-	Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
-	if (!preserve_ncci)
-		ncci_free_receive_buffers(plci, ncci);
-	if (ncci)
-	{
-		if (a->ncci_plci[ncci] != plci->Id)
-		{
-			ncci_mapping_bug++;
-			dbug(1, dprintf("NCCI mapping doesn't exist %ld %08lx %02x",
-					ncci_mapping_bug, Id, preserve_ncci));
-		}
-		else
-		{
-			cleanup_ncci_data(plci, ncci);
-			dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
-					ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
-			a->ch_ncci[a->ncci_ch[ncci]] = 0;
-			if (!preserve_ncci)
-			{
-				a->ncci_ch[ncci] = 0;
-				a->ncci_plci[ncci] = 0;
-				a->ncci_state[ncci] = IDLE;
-				i = plci->ncci_ring_list;
-				while ((i != 0) && (a->ncci_next[i] != plci->ncci_ring_list) && (a->ncci_next[i] != ncci))
-					i = a->ncci_next[i];
-				if ((i != 0) && (a->ncci_next[i] == ncci))
-				{
-					if (i == ncci)
-						plci->ncci_ring_list = 0;
-					else if (plci->ncci_ring_list == ncci)
-						plci->ncci_ring_list = i;
-					a->ncci_next[i] = a->ncci_next[ncci];
-				}
-				a->ncci_next[ncci] = 0;
-			}
-		}
-	}
-	else
-	{
-		for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
-		{
-			if (a->ncci_plci[ncci] == plci->Id)
-			{
-				cleanup_ncci_data(plci, ncci);
-				dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
-						ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
-				a->ch_ncci[a->ncci_ch[ncci]] = 0;
-				if (!preserve_ncci)
-				{
-					a->ncci_ch[ncci] = 0;
-					a->ncci_plci[ncci] = 0;
-					a->ncci_state[ncci] = IDLE;
-					a->ncci_next[ncci] = 0;
-				}
-			}
-		}
-		if (!preserve_ncci)
-			plci->ncci_ring_list = 0;
-	}
-}
-
-
-/*------------------------------------------------------------------*/
-/* PLCI remove function                                             */
-/*------------------------------------------------------------------*/
-
-static void plci_free_msg_in_queue(PLCI *plci)
-{
-	word i;
-
-	if (plci->appl)
-	{
-		i = plci->msg_in_read_pos;
-		while (i != plci->msg_in_write_pos)
-		{
-			if (i == plci->msg_in_wrap_pos)
-				i = 0;
-			if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.command == _DATA_B3_R)
-			{
-
-				TransmitBufferFree(plci->appl,
-						   (byte *)(long)(((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->info.data_b3_req.Data));
-
-			}
-
-			i += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.length +
-			      MSG_IN_OVERHEAD + 3) & 0xfffc;
-
-		}
-	}
-	plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
-	plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
-	plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
-}
-
-
-static void plci_remove(PLCI *plci)
-{
-
-	if (!plci) {
-		dbug(1, dprintf("plci_remove(no plci)"));
-		return;
-	}
-	init_internal_command_queue(plci);
-	dbug(1, dprintf("plci_remove(%x,tel=%x)", plci->Id, plci->tel));
-	if (plci_remove_check(plci))
-	{
-		return;
-	}
-	if (plci->Sig.Id == 0xff)
-	{
-		dbug(1, dprintf("D-channel X.25 plci->NL.Id:%0x", plci->NL.Id));
-		if (plci->NL.Id && !plci->nl_remove_id)
-		{
-			nl_req_ncci(plci, REMOVE, 0);
-			send_req(plci);
-		}
-	}
-	else
-	{
-		if (!plci->sig_remove_id
-		    && (plci->Sig.Id
-			|| (plci->req_in != plci->req_out)
-			|| (plci->nl_req || plci->sig_req)))
-		{
-			sig_req(plci, HANGUP, 0);
-			send_req(plci);
-		}
-	}
-	ncci_remove(plci, 0, false);
-	plci_free_msg_in_queue(plci);
-
-	plci->channels = 0;
-	plci->appl = NULL;
-	if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT))
-		plci->State = OUTG_DIS_PENDING;
-}
-
-/*------------------------------------------------------------------*/
-/* translation function for each message                            */
-/*------------------------------------------------------------------*/
-
-static byte connect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word ch;
-	word i;
-	word Info;
-	byte LinkLayer;
-	API_PARSE *ai;
-	API_PARSE *bp;
-	API_PARSE ai_parms[5];
-	word channel = 0;
-	dword ch_mask;
-	byte m;
-	static byte esc_chi[35] = {0x02, 0x18, 0x01};
-	static byte lli[2] = {0x01, 0x00};
-	byte noCh = 0;
-	word dir = 0;
-	byte *p_chi = "";
-
-	for (i = 0; i < 5; i++) ai_parms[i].length = 0;
-
-	dbug(1, dprintf("connect_req(%d)", parms->length));
-	Info = _WRONG_IDENTIFIER;
-	if (a)
-	{
-		if (a->adapter_disabled)
-		{
-			dbug(1, dprintf("adapter disabled"));
-			Id = ((word)1 << 8) | a->Id;
-			sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
-			sendf(appl, _DISCONNECT_I, Id, 0, "w", _L1_ERROR);
-			return false;
-		}
-		Info = _OUT_OF_PLCI;
-		if ((i = get_plci(a)))
-		{
-			Info = 0;
-			plci = &a->plci[i - 1];
-			plci->appl = appl;
-			plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
-			/* check 'external controller' bit for codec support */
-			if (Id & EXT_CONTROLLER)
-			{
-				if (AdvCodecSupport(a, plci, appl, 0))
-				{
-					plci->Id = 0;
-					sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
-					return 2;
-				}
-			}
-			ai = &parms[9];
-			bp = &parms[5];
-			ch = 0;
-			if (bp->length)LinkLayer = bp->info[3];
-			else LinkLayer = 0;
-			if (ai->length)
-			{
-				ch = 0xffff;
-				if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
-				{
-					ch = 0;
-					if (ai_parms[0].length)
-					{
-						ch = GET_WORD(ai_parms[0].info + 1);
-						if (ch > 4) ch = 0; /* safety -> ignore ChannelID */
-						if (ch == 4) /* explizit CHI in message */
-						{
-							/* check length of B-CH struct */
-							if ((ai_parms[0].info)[3] >= 1)
-							{
-								if ((ai_parms[0].info)[4] == CHI)
-								{
-									p_chi = &((ai_parms[0].info)[5]);
-								}
-								else
-								{
-									p_chi = &((ai_parms[0].info)[3]);
-								}
-								if (p_chi[0] > 35) /* check length of channel ID */
-								{
-									Info = _WRONG_MESSAGE_FORMAT;
-								}
-							}
-							else Info = _WRONG_MESSAGE_FORMAT;
-						}
-
-						if (ch == 3 && ai_parms[0].length >= 7 && ai_parms[0].length <= 36)
-						{
-							dir = GET_WORD(ai_parms[0].info + 3);
-							ch_mask = 0;
-							m = 0x3f;
-							for (i = 0; i + 5 <= ai_parms[0].length; i++)
-							{
-								if (ai_parms[0].info[i + 5] != 0)
-								{
-									if ((ai_parms[0].info[i + 5] | m) != 0xff)
-										Info = _WRONG_MESSAGE_FORMAT;
-									else
-									{
-										if (ch_mask == 0)
-											channel = i;
-										ch_mask |= 1L << i;
-									}
-								}
-								m = 0;
-							}
-							if (ch_mask == 0)
-								Info = _WRONG_MESSAGE_FORMAT;
-							if (!Info)
-							{
-								if ((ai_parms[0].length == 36) || (ch_mask != ((dword)(1L << channel))))
-								{
-									esc_chi[0] = (byte)(ai_parms[0].length - 2);
-									for (i = 0; i + 5 <= ai_parms[0].length; i++)
-										esc_chi[i + 3] = ai_parms[0].info[i + 5];
-								}
-								else
-									esc_chi[0] = 2;
-								esc_chi[2] = (byte)channel;
-								plci->b_channel = (byte)channel; /* not correct for ETSI ch 17..31 */
-								add_p(plci, LLI, lli);
-								add_p(plci, ESC, esc_chi);
-								plci->State = LOCAL_CONNECT;
-								if (!dir) plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;     /* dir 0=DTE, 1=DCE */
-							}
-						}
-					}
-				}
-				else  Info = _WRONG_MESSAGE_FORMAT;
-			}
-
-			dbug(1, dprintf("ch=%x,dir=%x,p_ch=%d", ch, dir, channel));
-			plci->command = _CONNECT_R;
-			plci->number = Number;
-			/* x.31 or D-ch free SAPI in LinkLayer? */
-			if (ch == 1 && LinkLayer != 3 && LinkLayer != 12) noCh = true;
-			if ((ch == 0 || ch == 2 || noCh || ch == 3 || ch == 4) && !Info)
-			{
-				/* B-channel used for B3 connections (ch==0), or no B channel    */
-				/* is used (ch==2) or perm. connection (3) is used  do a CALL    */
-				if (noCh) Info = add_b1(plci, &parms[5], 2, 0);    /* no resource    */
-				else     Info = add_b1(plci, &parms[5], ch, 0);
-				add_s(plci, OAD, &parms[2]);
-				add_s(plci, OSA, &parms[4]);
-				add_s(plci, BC, &parms[6]);
-				add_s(plci, LLC, &parms[7]);
-				add_s(plci, HLC, &parms[8]);
-				if (a->Info_Mask[appl->Id - 1] & 0x200)
-				{
-					/* early B3 connect (CIP mask bit 9) no release after a disc */
-					add_p(plci, LLI, "\x01\x01");
-				}
-				if (GET_WORD(parms[0].info) < 29) {
-					add_p(plci, BC, cip_bc[GET_WORD(parms[0].info)][a->u_law]);
-					add_p(plci, HLC, cip_hlc[GET_WORD(parms[0].info)]);
-				}
-				add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-				sig_req(plci, ASSIGN, DSIG_ID);
-			}
-			else if (ch == 1) {
-
-				/* D-Channel used for B3 connections */
-				plci->Sig.Id = 0xff;
-				Info = 0;
-			}
-
-			if (!Info && ch != 2 && !noCh) {
-				Info = add_b23(plci, &parms[5]);
-				if (!Info) {
-					if (!(plci->tel && !plci->adv_nl))nl_req_ncci(plci, ASSIGN, 0);
-				}
-			}
-
-			if (!Info)
-			{
-				if (ch == 0 || ch == 2 || ch == 3 || noCh || ch == 4)
-				{
-					if (plci->spoofed_msg == SPOOFING_REQUIRED)
-					{
-						api_save_msg(parms, "wsssssssss", &plci->saved_msg);
-						plci->spoofed_msg = CALL_REQ;
-						plci->internal_command = BLOCK_PLCI;
-						plci->command = 0;
-						dbug(1, dprintf("Spoof"));
-						send_req(plci);
-						return false;
-					}
-					if (ch == 4)add_p(plci, CHI, p_chi);
-					add_s(plci, CPN, &parms[1]);
-					add_s(plci, DSA, &parms[3]);
-					if (noCh) add_p(plci, ESC, "\x02\x18\xfd");  /* D-channel, no B-L3 */
-					add_ai(plci, &parms[9]);
-					if (!dir)sig_req(plci, CALL_REQ, 0);
-					else
-					{
-						plci->command = PERM_LIST_REQ;
-						plci->appl = appl;
-						sig_req(plci, LISTEN_REQ, 0);
-						send_req(plci);
-						return false;
-					}
-				}
-				send_req(plci);
-				return false;
-			}
-			plci->Id = 0;
-		}
-	}
-	sendf(appl,
-	      _CONNECT_R | CONFIRM,
-	      Id,
-	      Number,
-	      "w", Info);
-	return 2;
-}
-
-static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word i, Info;
-	word Reject;
-	static byte cau_t[] = {0, 0, 0x90, 0x91, 0xac, 0x9d, 0x86, 0xd8, 0x9b};
-	static byte esc_t[] = {0x03, 0x08, 0x00, 0x00};
-	API_PARSE *ai;
-	API_PARSE ai_parms[5];
-	word ch = 0;
-
-	if (!plci) {
-		dbug(1, dprintf("connect_res(no plci)"));
-		return 0;  /* no plci, no send */
-	}
-
-	dbug(1, dprintf("connect_res(State=0x%x)", plci->State));
-	for (i = 0; i < 5; i++) ai_parms[i].length = 0;
-	ai = &parms[5];
-	dbug(1, dprintf("ai->length=%d", ai->length));
-
-	if (ai->length)
-	{
-		if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
-		{
-			dbug(1, dprintf("ai_parms[0].length=%d/0x%x", ai_parms[0].length, GET_WORD(ai_parms[0].info + 1)));
-			ch = 0;
-			if (ai_parms[0].length)
-			{
-				ch = GET_WORD(ai_parms[0].info + 1);
-				dbug(1, dprintf("BCH-I=0x%x", ch));
-			}
-		}
-	}
-
-	if (plci->State == INC_CON_CONNECTED_ALERT)
-	{
-		dbug(1, dprintf("Connected Alert Call_Res"));
-		if (a->Info_Mask[appl->Id - 1] & 0x200)
-		{
-			/* early B3 connect (CIP mask bit 9) no release after a disc */
-			add_p(plci, LLI, "\x01\x01");
-		}
-		add_s(plci, CONN_NR, &parms[2]);
-		add_s(plci, LLC, &parms[4]);
-		add_ai(plci, &parms[5]);
-		plci->State = INC_CON_ACCEPT;
-		sig_req(plci, CALL_RES, 0);
-		return 1;
-	}
-	else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) {
-		__clear_bit(appl->Id - 1, plci->c_ind_mask_table);
-		dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
-		Reject = GET_WORD(parms[0].info);
-		dbug(1, dprintf("Reject=0x%x", Reject));
-		if (Reject)
-		{
-			if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
-			{
-				if ((Reject & 0xff00) == 0x3400)
-				{
-					esc_t[2] = ((byte)(Reject & 0x00ff)) | 0x80;
-					add_p(plci, ESC, esc_t);
-					add_ai(plci, &parms[5]);
-					sig_req(plci, REJECT, 0);
-				}
-				else if (Reject == 1 || Reject >= 9)
-				{
-					add_ai(plci, &parms[5]);
-					sig_req(plci, HANGUP, 0);
-				}
-				else
-				{
-					esc_t[2] = cau_t[(Reject&0x000f)];
-					add_p(plci, ESC, esc_t);
-					add_ai(plci, &parms[5]);
-					sig_req(plci, REJECT, 0);
-				}
-				plci->appl = appl;
-			}
-			else
-			{
-				sendf(appl, _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
-			}
-		}
-		else {
-			plci->appl = appl;
-			if (Id & EXT_CONTROLLER) {
-				if (AdvCodecSupport(a, plci, appl, 0)) {
-					dbug(1, dprintf("connect_res(error from AdvCodecSupport)"));
-					sig_req(plci, HANGUP, 0);
-					return 1;
-				}
-				if (plci->tel == ADV_VOICE && a->AdvCodecPLCI)
-				{
-					Info = add_b23(plci, &parms[1]);
-					if (Info)
-					{
-						dbug(1, dprintf("connect_res(error from add_b23)"));
-						sig_req(plci, HANGUP, 0);
-						return 1;
-					}
-					if (plci->adv_nl)
-					{
-						nl_req_ncci(plci, ASSIGN, 0);
-					}
-				}
-			}
-			else
-			{
-				plci->tel = 0;
-				if (ch != 2)
-				{
-					Info = add_b23(plci, &parms[1]);
-					if (Info)
-					{
-						dbug(1, dprintf("connect_res(error from add_b23 2)"));
-						sig_req(plci, HANGUP, 0);
-						return 1;
-					}
-				}
-				nl_req_ncci(plci, ASSIGN, 0);
-			}
-
-			if (plci->spoofed_msg == SPOOFING_REQUIRED)
-			{
-				api_save_msg(parms, "wsssss", &plci->saved_msg);
-				plci->spoofed_msg = CALL_RES;
-				plci->internal_command = BLOCK_PLCI;
-				plci->command = 0;
-				dbug(1, dprintf("Spoof"));
-			}
-			else
-			{
-				add_b1(plci, &parms[1], ch, plci->B1_facilities);
-				if (a->Info_Mask[appl->Id - 1] & 0x200)
-				{
-					/* early B3 connect (CIP mask bit 9) no release after a disc */
-					add_p(plci, LLI, "\x01\x01");
-				}
-				add_s(plci, CONN_NR, &parms[2]);
-				add_s(plci, LLC, &parms[4]);
-				add_ai(plci, &parms[5]);
-				plci->State = INC_CON_ACCEPT;
-				sig_req(plci, CALL_RES, 0);
-			}
-
-			for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
-				sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
-		}
-	}
-	return 1;
-}
-
-static byte connect_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			  PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	dbug(1, dprintf("connect_a_res"));
-	return false;
-}
-
-static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			   PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	word Info;
-	word i;
-
-	dbug(1, dprintf("disconnect_req"));
-
-	Info = _WRONG_IDENTIFIER;
-
-	if (plci)
-	{
-		if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
-		{
-			__clear_bit(appl->Id - 1, plci->c_ind_mask_table);
-			plci->appl = appl;
-			for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
-				sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
-			plci->State = OUTG_DIS_PENDING;
-		}
-		if (plci->Sig.Id && plci->appl)
-		{
-			Info = 0;
-			if (plci->Sig.Id != 0xff)
-			{
-				if (plci->State != INC_DIS_PENDING)
-				{
-					add_ai(plci, &msg[0]);
-					sig_req(plci, HANGUP, 0);
-					plci->State = OUTG_DIS_PENDING;
-					return 1;
-				}
-			}
-			else
-			{
-				if (plci->NL.Id && !plci->nl_remove_id)
-				{
-					mixer_remove(plci);
-					nl_req_ncci(plci, REMOVE, 0);
-					sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
-					sendf(appl, _DISCONNECT_I, Id, 0, "w", 0);
-					plci->State = INC_DIS_PENDING;
-				}
-				return 1;
-			}
-		}
-	}
-
-	if (!appl)  return false;
-	sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", Info);
-	return false;
-}
-
-static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			   PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	dbug(1, dprintf("disconnect_res"));
-	if (plci)
-	{
-		/* clear ind mask bit, just in case of collsion of          */
-		/* DISCONNECT_IND and CONNECT_RES                           */
-		__clear_bit(appl->Id - 1, plci->c_ind_mask_table);
-		ncci_free_receive_buffers(plci, 0);
-		if (plci_remove_check(plci))
-		{
-			return 0;
-		}
-		if (plci->State == INC_DIS_PENDING
-		    || plci->State == SUSPENDING) {
-			if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) {
-				if (plci->State != SUSPENDING) plci->State = IDLE;
-				dbug(1, dprintf("chs=%d", plci->channels));
-				if (!plci->channels) {
-					plci_remove(plci);
-				}
-			}
-		}
-	}
-	return 0;
-}
-
-static byte listen_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-		       PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word Info;
-	byte i;
-
-	dbug(1, dprintf("listen_req(Appl=0x%x)", appl->Id));
-
-	Info = _WRONG_IDENTIFIER;
-	if (a) {
-		Info = 0;
-		a->Info_Mask[appl->Id - 1] = GET_DWORD(parms[0].info);
-		a->CIP_Mask[appl->Id - 1] = GET_DWORD(parms[1].info);
-		dbug(1, dprintf("CIP_MASK=0x%lx", GET_DWORD(parms[1].info)));
-		if (a->Info_Mask[appl->Id - 1] & 0x200) { /* early B3 connect provides */
-			a->Info_Mask[appl->Id - 1] |=  0x10;   /* call progression infos    */
-		}
-
-		/* check if external controller listen and switch listen on or off*/
-		if (Id&EXT_CONTROLLER && GET_DWORD(parms[1].info)) {
-			if (a->profile.Global_Options & ON_BOARD_CODEC) {
-				dummy_plci.State = IDLE;
-				a->codec_listen[appl->Id - 1] = &dummy_plci;
-				a->TelOAD[0] = (byte)(parms[3].length);
-				for (i = 1; parms[3].length >= i && i < 22; i++) {
-					a->TelOAD[i] = parms[3].info[i];
-				}
-				a->TelOAD[i] = 0;
-				a->TelOSA[0] = (byte)(parms[4].length);
-				for (i = 1; parms[4].length >= i && i < 22; i++) {
-					a->TelOSA[i] = parms[4].info[i];
-				}
-				a->TelOSA[i] = 0;
-			}
-			else Info = 0x2002; /* wrong controller, codec not supported */
-		}
-		else{               /* clear listen */
-			a->codec_listen[appl->Id - 1] = (PLCI *)0;
-		}
-	}
-	sendf(appl,
-	      _LISTEN_R | CONFIRM,
-	      Id,
-	      Number,
-	      "w", Info);
-
-	if (a) listen_check(a);
-	return false;
-}
-
-static byte info_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-		     PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	word i;
-	API_PARSE *ai;
-	PLCI *rc_plci = NULL;
-	API_PARSE ai_parms[5];
-	word Info = 0;
-
-	dbug(1, dprintf("info_req"));
-	for (i = 0; i < 5; i++) ai_parms[i].length = 0;
-
-	ai = &msg[1];
-
-	if (ai->length)
-	{
-		if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
-		{
-			dbug(1, dprintf("AddInfo wrong"));
-			Info = _WRONG_MESSAGE_FORMAT;
-		}
-	}
-	if (!a) Info = _WRONG_STATE;
-
-	if (!Info && plci)
-	{                /* no fac, with CPN, or KEY */
-		rc_plci = plci;
-		if (!ai_parms[3].length && plci->State && (msg[0].length || ai_parms[1].length))
-		{
-			/* overlap sending option */
-			dbug(1, dprintf("OvlSnd"));
-			add_s(plci, CPN, &msg[0]);
-			add_s(plci, KEY, &ai_parms[1]);
-			sig_req(plci, INFO_REQ, 0);
-			send_req(plci);
-			return false;
-		}
-
-		if (plci->State && ai_parms[2].length)
-		{
-			/* User_Info option */
-			dbug(1, dprintf("UUI"));
-			add_s(plci, UUI, &ai_parms[2]);
-			sig_req(plci, USER_DATA, 0);
-		}
-		else if (plci->State && ai_parms[3].length)
-		{
-			/* Facility option */
-			dbug(1, dprintf("FAC"));
-			add_s(plci, CPN, &msg[0]);
-			add_ai(plci, &msg[1]);
-			sig_req(plci, FACILITY_REQ, 0);
-		}
-		else
-		{
-			Info = _WRONG_STATE;
-		}
-	}
-	else if ((ai_parms[1].length || ai_parms[2].length || ai_parms[3].length) && !Info)
-	{
-		/* NCR_Facility option -> send UUI and Keypad too */
-		dbug(1, dprintf("NCR_FAC"));
-		if ((i = get_plci(a)))
-		{
-			rc_plci = &a->plci[i - 1];
-			appl->NullCREnable = true;
-			rc_plci->internal_command = C_NCR_FAC_REQ;
-			rc_plci->appl = appl;
-			add_p(rc_plci, CAI, "\x01\x80");
-			add_p(rc_plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-			sig_req(rc_plci, ASSIGN, DSIG_ID);
-			send_req(rc_plci);
-		}
-		else
-		{
-			Info = _OUT_OF_PLCI;
-		}
-
-		if (!Info)
-		{
-			add_s(rc_plci, CPN, &msg[0]);
-			add_ai(rc_plci, &msg[1]);
-			sig_req(rc_plci, NCR_FACILITY, 0);
-			send_req(rc_plci);
-			return false;
-			/* for application controlled supplementary services    */
-		}
-	}
-
-	if (!rc_plci)
-	{
-		Info = _WRONG_MESSAGE_FORMAT;
-	}
-
-	if (!Info)
-	{
-		send_req(rc_plci);
-	}
-	else
-	{  /* appl is not assigned to a PLCI or error condition */
-		dbug(1, dprintf("localInfoCon"));
-		sendf(appl,
-		      _INFO_R | CONFIRM,
-		      Id,
-		      Number,
-		      "w", Info);
-	}
-	return false;
-}
-
-static byte info_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-		     PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	dbug(1, dprintf("info_res"));
-	return false;
-}
-
-static byte alert_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-		      PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	word Info;
-	byte ret;
-
-	dbug(1, dprintf("alert_req"));
-
-	Info = _WRONG_IDENTIFIER;
-	ret = false;
-	if (plci) {
-		Info = _ALERT_IGNORED;
-		if (plci->State != INC_CON_ALERT) {
-			Info = _WRONG_STATE;
-			if (plci->State == INC_CON_PENDING) {
-				Info = 0;
-				plci->State = INC_CON_ALERT;
-				add_ai(plci, &msg[0]);
-				sig_req(plci, CALL_ALERT, 0);
-				ret = 1;
-			}
-		}
-	}
-	sendf(appl,
-	      _ALERT_R | CONFIRM,
-	      Id,
-	      Number,
-	      "w", Info);
-	return ret;
-}
-
-static byte facility_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			 PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	word Info = 0;
-	word i    = 0;
-
-	word selector;
-	word SSreq;
-	long relatedPLCIvalue;
-	DIVA_CAPI_ADAPTER *relatedadapter;
-	byte *SSparms  = "";
-	byte RCparms[]  = "\x05\x00\x00\x02\x00\x00";
-	byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
-	API_PARSE *parms;
-	API_PARSE ss_parms[11];
-	PLCI *rplci;
-	byte cai[15];
-	dword d;
-	API_PARSE dummy;
-
-	dbug(1, dprintf("facility_req"));
-	for (i = 0; i < 9; i++) ss_parms[i].length = 0;
-
-	parms = &msg[1];
-
-	if (!a)
-	{
-		dbug(1, dprintf("wrong Ctrl"));
-		Info = _WRONG_IDENTIFIER;
-	}
-
-	selector = GET_WORD(msg[0].info);
-
-	if (!Info)
-	{
-		switch (selector)
-		{
-		case SELECTOR_HANDSET:
-			Info = AdvCodecSupport(a, plci, appl, HOOK_SUPPORT);
-			break;
-
-		case SELECTOR_SU_SERV:
-			if (!msg[1].length)
-			{
-				Info = _WRONG_MESSAGE_FORMAT;
-				break;
-			}
-			SSreq = GET_WORD(&(msg[1].info[1]));
-			PUT_WORD(&RCparms[1], SSreq);
-			SSparms = RCparms;
-			switch (SSreq)
-			{
-			case S_GET_SUPPORTED_SERVICES:
-				if ((i = get_plci(a)))
-				{
-					rplci = &a->plci[i - 1];
-					rplci->appl = appl;
-					add_p(rplci, CAI, "\x01\x80");
-					add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-					sig_req(rplci, ASSIGN, DSIG_ID);
-					send_req(rplci);
-				}
-				else
-				{
-					PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
-					SSparms = (byte *)SSstruct;
-					break;
-				}
-				rplci->internal_command = GETSERV_REQ_PEND;
-				rplci->number = Number;
-				rplci->appl = appl;
-				sig_req(rplci, S_SUPPORTED, 0);
-				send_req(rplci);
-				return false;
-				break;
-
-			case S_LISTEN:
-				if (parms->length == 7)
-				{
-					if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
-					{
-						dbug(1, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-						break;
-					}
-				}
-				else
-				{
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				a->Notification_Mask[appl->Id - 1] = GET_DWORD(ss_parms[2].info);
-				if (a->Notification_Mask[appl->Id - 1] & SMASK_MWI) /* MWI active? */
-				{
-					if ((i = get_plci(a)))
-					{
-						rplci = &a->plci[i - 1];
-						rplci->appl = appl;
-						add_p(rplci, CAI, "\x01\x80");
-						add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-						sig_req(rplci, ASSIGN, DSIG_ID);
-						send_req(rplci);
-					}
-					else
-					{
-						break;
-					}
-					rplci->internal_command = GET_MWI_STATE;
-					rplci->number = Number;
-					sig_req(rplci, MWI_POLL, 0);
-					send_req(rplci);
-				}
-				break;
-
-			case S_HOLD:
-				api_parse(&parms->info[1], (word)parms->length, "ws", ss_parms);
-				if (plci && plci->State && plci->SuppState == IDLE)
-				{
-					plci->SuppState = HOLD_REQUEST;
-					plci->command = C_HOLD_REQ;
-					add_s(plci, CAI, &ss_parms[1]);
-					sig_req(plci, CALL_HOLD, 0);
-					send_req(plci);
-					return false;
-				}
-				else Info = 0x3010;                    /* wrong state           */
-				break;
-			case S_RETRIEVE:
-				if (plci && plci->State && plci->SuppState == CALL_HELD)
-				{
-					if (Id & EXT_CONTROLLER)
-					{
-						if (AdvCodecSupport(a, plci, appl, 0))
-						{
-							Info = 0x3010;                    /* wrong state           */
-							break;
-						}
-					}
-					else plci->tel = 0;
-
-					plci->SuppState = RETRIEVE_REQUEST;
-					plci->command = C_RETRIEVE_REQ;
-					if (plci->spoofed_msg == SPOOFING_REQUIRED)
-					{
-						plci->spoofed_msg = CALL_RETRIEVE;
-						plci->internal_command = BLOCK_PLCI;
-						plci->command = 0;
-						dbug(1, dprintf("Spoof"));
-						return false;
-					}
-					else
-					{
-						sig_req(plci, CALL_RETRIEVE, 0);
-						send_req(plci);
-						return false;
-					}
-				}
-				else Info = 0x3010;                    /* wrong state           */
-				break;
-			case S_SUSPEND:
-				if (parms->length)
-				{
-					if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
-					{
-						dbug(1, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-						break;
-					}
-				}
-				if (plci && plci->State)
-				{
-					add_s(plci, CAI, &ss_parms[2]);
-					plci->command = SUSPEND_REQ;
-					sig_req(plci, SUSPEND, 0);
-					plci->State = SUSPENDING;
-					send_req(plci);
-				}
-				else Info = 0x3010;                    /* wrong state           */
-				break;
-
-			case S_RESUME:
-				if (!(i = get_plci(a)))
-				{
-					Info = _OUT_OF_PLCI;
-					break;
-				}
-				rplci = &a->plci[i - 1];
-				rplci->appl = appl;
-				rplci->number = Number;
-				rplci->tel = 0;
-				rplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
-				/* check 'external controller' bit for codec support */
-				if (Id & EXT_CONTROLLER)
-				{
-					if (AdvCodecSupport(a, rplci, appl, 0))
-					{
-						rplci->Id = 0;
-						Info = 0x300A;
-						break;
-					}
-				}
-				if (parms->length)
-				{
-					if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
-					{
-						dbug(1, dprintf("format wrong"));
-						rplci->Id = 0;
-						Info = _WRONG_MESSAGE_FORMAT;
-						break;
-					}
-				}
-				dummy.length = 0;
-				dummy.info = "\x00";
-				add_b1(rplci, &dummy, 0, 0);
-				if (a->Info_Mask[appl->Id - 1] & 0x200)
-				{
-					/* early B3 connect (CIP mask bit 9) no release after a disc */
-					add_p(rplci, LLI, "\x01\x01");
-				}
-				add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-				sig_req(rplci, ASSIGN, DSIG_ID);
-				send_req(rplci);
-				add_s(rplci, CAI, &ss_parms[2]);
-				rplci->command = RESUME_REQ;
-				sig_req(rplci, RESUME, 0);
-				rplci->State = RESUMING;
-				send_req(rplci);
-				break;
-
-			case S_CONF_BEGIN: /* Request */
-			case S_CONF_DROP:
-			case S_CONF_ISOLATE:
-			case S_CONF_REATTACH:
-				if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
-				{
-					dbug(1, dprintf("format wrong"));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				if (plci && plci->State && ((plci->SuppState == IDLE) || (plci->SuppState == CALL_HELD)))
-				{
-					d = GET_DWORD(ss_parms[2].info);
-					if (d >= 0x80)
-					{
-						dbug(1, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-						break;
-					}
-					plci->ptyState = (byte)SSreq;
-					plci->command = 0;
-					cai[0] = 2;
-					switch (SSreq)
-					{
-					case S_CONF_BEGIN:
-						cai[1] = CONF_BEGIN;
-						plci->internal_command = CONF_BEGIN_REQ_PEND;
-						break;
-					case S_CONF_DROP:
-						cai[1] = CONF_DROP;
-						plci->internal_command = CONF_DROP_REQ_PEND;
-						break;
-					case S_CONF_ISOLATE:
-						cai[1] = CONF_ISOLATE;
-						plci->internal_command = CONF_ISOLATE_REQ_PEND;
-						break;
-					case S_CONF_REATTACH:
-						cai[1] = CONF_REATTACH;
-						plci->internal_command = CONF_REATTACH_REQ_PEND;
-						break;
-					}
-					cai[2] = (byte)d; /* Conference Size resp. PartyId */
-					add_p(plci, CAI, cai);
-					sig_req(plci, S_SERVICE, 0);
-					send_req(plci);
-					return false;
-				}
-				else Info = 0x3010;                    /* wrong state           */
-				break;
-
-			case S_ECT:
-			case S_3PTY_BEGIN:
-			case S_3PTY_END:
-			case S_CONF_ADD:
-				if (parms->length == 7)
-				{
-					if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
-					{
-						dbug(1, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-						break;
-					}
-				}
-				else if (parms->length == 8) /* workaround for the T-View-S */
-				{
-					if (api_parse(&parms->info[1], (word)parms->length, "wbdb", ss_parms))
-					{
-						dbug(1, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-						break;
-					}
-				}
-				else
-				{
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				if (!msg[1].length)
-				{
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				if (!plci)
-				{
-					Info = _WRONG_IDENTIFIER;
-					break;
-				}
-				relatedPLCIvalue = GET_DWORD(ss_parms[2].info);
-				relatedPLCIvalue &= 0x0000FFFF;
-				dbug(1, dprintf("PTY/ECT/addCONF,relPLCI=%lx", relatedPLCIvalue));
-				/* controller starts with 0 up to (max_adapter - 1) */
-				if (((relatedPLCIvalue & 0x7f) == 0)
-				    || (MapController((byte)(relatedPLCIvalue & 0x7f)) == 0)
-				    || (MapController((byte)(relatedPLCIvalue & 0x7f)) > max_adapter))
-				{
-					if (SSreq == S_3PTY_END)
-					{
-						dbug(1, dprintf("wrong Controller use 2nd PLCI=PLCI"));
-						rplci = plci;
-					}
-					else
-					{
-						Info = 0x3010;                    /* wrong state           */
-						break;
-					}
-				}
-				else
-				{
-					relatedadapter = &adapter[MapController((byte)(relatedPLCIvalue & 0x7f)) - 1];
-					relatedPLCIvalue >>= 8;
-					/* find PLCI PTR*/
-					for (i = 0, rplci = NULL; i < relatedadapter->max_plci; i++)
-					{
-						if (relatedadapter->plci[i].Id == (byte)relatedPLCIvalue)
-						{
-							rplci = &relatedadapter->plci[i];
-						}
-					}
-					if (!rplci || !relatedPLCIvalue)
-					{
-						if (SSreq == S_3PTY_END)
-						{
-							dbug(1, dprintf("use 2nd PLCI=PLCI"));
-							rplci = plci;
-						}
-						else
-						{
-							Info = 0x3010;                    /* wrong state           */
-							break;
-						}
-					}
-				}
-/*
-  dbug(1, dprintf("rplci:%x", rplci));
-  dbug(1, dprintf("plci:%x", plci));
-  dbug(1, dprintf("rplci->ptyState:%x", rplci->ptyState));
-  dbug(1, dprintf("plci->ptyState:%x", plci->ptyState));
-  dbug(1, dprintf("SSreq:%x", SSreq));
-  dbug(1, dprintf("rplci->internal_command:%x", rplci->internal_command));
-  dbug(1, dprintf("rplci->appl:%x", rplci->appl));
-  dbug(1, dprintf("rplci->Id:%x", rplci->Id));
-*/
-				/* send PTY/ECT req, cannot check all states because of US stuff */
-				if (!rplci->internal_command && rplci->appl)
-				{
-					plci->command = 0;
-					rplci->relatedPTYPLCI = plci;
-					plci->relatedPTYPLCI = rplci;
-					rplci->ptyState = (byte)SSreq;
-					if (SSreq == S_ECT)
-					{
-						rplci->internal_command = ECT_REQ_PEND;
-						cai[1] = ECT_EXECUTE;
-
-						rplci->vswitchstate = 0;
-						rplci->vsprot = 0;
-						rplci->vsprotdialect = 0;
-						plci->vswitchstate = 0;
-						plci->vsprot = 0;
-						plci->vsprotdialect = 0;
-
-					}
-					else if (SSreq == S_CONF_ADD)
-					{
-						rplci->internal_command = CONF_ADD_REQ_PEND;
-						cai[1] = CONF_ADD;
-					}
-					else
-					{
-						rplci->internal_command = PTY_REQ_PEND;
-						cai[1] = (byte)(SSreq - 3);
-					}
-					rplci->number = Number;
-					if (plci != rplci) /* explicit invocation */
-					{
-						cai[0] = 2;
-						cai[2] = plci->Sig.Id;
-						dbug(1, dprintf("explicit invocation"));
-					}
-					else
-					{
-						dbug(1, dprintf("implicit invocation"));
-						cai[0] = 1;
-					}
-					add_p(rplci, CAI, cai);
-					sig_req(rplci, S_SERVICE, 0);
-					send_req(rplci);
-					return false;
-				}
-				else
-				{
-					dbug(0, dprintf("Wrong line"));
-					Info = 0x3010;                    /* wrong state           */
-					break;
-				}
-				break;
-
-			case S_CALL_DEFLECTION:
-				if (api_parse(&parms->info[1], (word)parms->length, "wbwss", ss_parms))
-				{
-					dbug(1, dprintf("format wrong"));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				if (!plci)
-				{
-					Info = _WRONG_IDENTIFIER;
-					break;
-				}
-				/* reuse unused screening indicator */
-				ss_parms[3].info[3] = (byte)GET_WORD(&(ss_parms[2].info[0]));
-				plci->command = 0;
-				plci->internal_command = CD_REQ_PEND;
-				appl->CDEnable = true;
-				cai[0] = 1;
-				cai[1] = CALL_DEFLECTION;
-				add_p(plci, CAI, cai);
-				add_p(plci, CPN, ss_parms[3].info);
-				sig_req(plci, S_SERVICE, 0);
-				send_req(plci);
-				return false;
-				break;
-
-			case S_CALL_FORWARDING_START:
-				if (api_parse(&parms->info[1], (word)parms->length, "wbdwwsss", ss_parms))
-				{
-					dbug(1, dprintf("format wrong"));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-
-				if ((i = get_plci(a)))
-				{
-					rplci = &a->plci[i - 1];
-					rplci->appl = appl;
-					add_p(rplci, CAI, "\x01\x80");
-					add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-					sig_req(rplci, ASSIGN, DSIG_ID);
-					send_req(rplci);
-				}
-				else
-				{
-					Info = _OUT_OF_PLCI;
-					break;
-				}
-
-				/* reuse unused screening indicator */
-				rplci->internal_command = CF_START_PEND;
-				rplci->appl = appl;
-				rplci->number = Number;
-				appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
-				cai[0] = 2;
-				cai[1] = 0x70 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
-				cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
-				add_p(rplci, CAI, cai);
-				add_p(rplci, OAD, ss_parms[5].info);
-				add_p(rplci, CPN, ss_parms[6].info);
-				sig_req(rplci, S_SERVICE, 0);
-				send_req(rplci);
-				return false;
-				break;
-
-			case S_INTERROGATE_DIVERSION:
-			case S_INTERROGATE_NUMBERS:
-			case S_CALL_FORWARDING_STOP:
-			case S_CCBS_REQUEST:
-			case S_CCBS_DEACTIVATE:
-			case S_CCBS_INTERROGATE:
-				switch (SSreq)
-				{
-				case S_INTERROGATE_NUMBERS:
-					if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
-					{
-						dbug(0, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-					}
-					break;
-				case S_CCBS_REQUEST:
-				case S_CCBS_DEACTIVATE:
-					if (api_parse(&parms->info[1], (word)parms->length, "wbdw", ss_parms))
-					{
-						dbug(0, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-					}
-					break;
-				case S_CCBS_INTERROGATE:
-					if (api_parse(&parms->info[1], (word)parms->length, "wbdws", ss_parms))
-					{
-						dbug(0, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-					}
-					break;
-				default:
-					if (api_parse(&parms->info[1], (word)parms->length, "wbdwws", ss_parms))
-					{
-						dbug(0, dprintf("format wrong"));
-						Info = _WRONG_MESSAGE_FORMAT;
-						break;
-					}
-					break;
-				}
-
-				if (Info) break;
-				if ((i = get_plci(a)))
-				{
-					rplci = &a->plci[i - 1];
-					switch (SSreq)
-					{
-					case S_INTERROGATE_DIVERSION: /* use cai with S_SERVICE below */
-						cai[1] = 0x60 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
-						rplci->internal_command = INTERR_DIVERSION_REQ_PEND; /* move to rplci if assigned */
-						break;
-					case S_INTERROGATE_NUMBERS: /* use cai with S_SERVICE below */
-						cai[1] = DIVERSION_INTERROGATE_NUM; /* Function */
-						rplci->internal_command = INTERR_NUMBERS_REQ_PEND; /* move to rplci if assigned */
-						break;
-					case S_CALL_FORWARDING_STOP:
-						rplci->internal_command = CF_STOP_PEND;
-						cai[1] = 0x80 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
-						break;
-					case S_CCBS_REQUEST:
-						cai[1] = CCBS_REQUEST;
-						rplci->internal_command = CCBS_REQUEST_REQ_PEND;
-						break;
-					case S_CCBS_DEACTIVATE:
-						cai[1] = CCBS_DEACTIVATE;
-						rplci->internal_command = CCBS_DEACTIVATE_REQ_PEND;
-						break;
-					case S_CCBS_INTERROGATE:
-						cai[1] = CCBS_INTERROGATE;
-						rplci->internal_command = CCBS_INTERROGATE_REQ_PEND;
-						break;
-					default:
-						cai[1] = 0;
-						break;
-					}
-					rplci->appl = appl;
-					rplci->number = Number;
-					add_p(rplci, CAI, "\x01\x80");
-					add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-					sig_req(rplci, ASSIGN, DSIG_ID);
-					send_req(rplci);
-				}
-				else
-				{
-					Info = _OUT_OF_PLCI;
-					break;
-				}
-
-				appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
-				switch (SSreq)
-				{
-				case S_INTERROGATE_NUMBERS:
-					cai[0] = 1;
-					add_p(rplci, CAI, cai);
-					break;
-				case S_CCBS_REQUEST:
-				case S_CCBS_DEACTIVATE:
-					cai[0] = 3;
-					PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
-					add_p(rplci, CAI, cai);
-					break;
-				case S_CCBS_INTERROGATE:
-					cai[0] = 3;
-					PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
-					add_p(rplci, CAI, cai);
-					add_p(rplci, OAD, ss_parms[4].info);
-					break;
-				default:
-					cai[0] = 2;
-					cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
-					add_p(rplci, CAI, cai);
-					add_p(rplci, OAD, ss_parms[5].info);
-					break;
-				}
-
-				sig_req(rplci, S_SERVICE, 0);
-				send_req(rplci);
-				return false;
-				break;
-
-			case S_MWI_ACTIVATE:
-				if (api_parse(&parms->info[1], (word)parms->length, "wbwdwwwssss", ss_parms))
-				{
-					dbug(1, dprintf("format wrong"));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				if (!plci)
-				{
-					if ((i = get_plci(a)))
-					{
-						rplci = &a->plci[i - 1];
-						rplci->appl = appl;
-						rplci->cr_enquiry = true;
-						add_p(rplci, CAI, "\x01\x80");
-						add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-						sig_req(rplci, ASSIGN, DSIG_ID);
-						send_req(rplci);
-					}
-					else
-					{
-						Info = _OUT_OF_PLCI;
-						break;
-					}
-				}
-				else
-				{
-					rplci = plci;
-					rplci->cr_enquiry = false;
-				}
-
-				rplci->command = 0;
-				rplci->internal_command = MWI_ACTIVATE_REQ_PEND;
-				rplci->appl = appl;
-				rplci->number = Number;
-
-				cai[0] = 13;
-				cai[1] = ACTIVATION_MWI; /* Function */
-				PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
-				PUT_DWORD(&cai[4], GET_DWORD(&(ss_parms[3].info[0]))); /* Number of Messages */
-				PUT_WORD(&cai[8], GET_WORD(&(ss_parms[4].info[0]))); /* Message Status */
-				PUT_WORD(&cai[10], GET_WORD(&(ss_parms[5].info[0]))); /* Message Reference */
-				PUT_WORD(&cai[12], GET_WORD(&(ss_parms[6].info[0]))); /* Invocation Mode */
-				add_p(rplci, CAI, cai);
-				add_p(rplci, CPN, ss_parms[7].info); /* Receiving User Number */
-				add_p(rplci, OAD, ss_parms[8].info); /* Controlling User Number */
-				add_p(rplci, OSA, ss_parms[9].info); /* Controlling User Provided Number */
-				add_p(rplci, UID, ss_parms[10].info); /* Time */
-				sig_req(rplci, S_SERVICE, 0);
-				send_req(rplci);
-				return false;
-
-			case S_MWI_DEACTIVATE:
-				if (api_parse(&parms->info[1], (word)parms->length, "wbwwss", ss_parms))
-				{
-					dbug(1, dprintf("format wrong"));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				if (!plci)
-				{
-					if ((i = get_plci(a)))
-					{
-						rplci = &a->plci[i - 1];
-						rplci->appl = appl;
-						rplci->cr_enquiry = true;
-						add_p(rplci, CAI, "\x01\x80");
-						add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-						sig_req(rplci, ASSIGN, DSIG_ID);
-						send_req(rplci);
-					}
-					else
-					{
-						Info = _OUT_OF_PLCI;
-						break;
-					}
-				}
-				else
-				{
-					rplci = plci;
-					rplci->cr_enquiry = false;
-				}
-
-				rplci->command = 0;
-				rplci->internal_command = MWI_DEACTIVATE_REQ_PEND;
-				rplci->appl = appl;
-				rplci->number = Number;
-
-				cai[0] = 5;
-				cai[1] = DEACTIVATION_MWI; /* Function */
-				PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
-				PUT_WORD(&cai[4], GET_WORD(&(ss_parms[3].info[0]))); /* Invocation Mode */
-				add_p(rplci, CAI, cai);
-				add_p(rplci, CPN, ss_parms[4].info); /* Receiving User Number */
-				add_p(rplci, OAD, ss_parms[5].info); /* Controlling User Number */
-				sig_req(rplci, S_SERVICE, 0);
-				send_req(rplci);
-				return false;
-
-			default:
-				Info = 0x300E;  /* not supported */
-				break;
-			}
-			break; /* case SELECTOR_SU_SERV: end */
-
-
-		case SELECTOR_DTMF:
-			return (dtmf_request(Id, Number, a, plci, appl, msg));
-
-
-
-		case SELECTOR_LINE_INTERCONNECT:
-			return (mixer_request(Id, Number, a, plci, appl, msg));
-
-
-
-		case PRIV_SELECTOR_ECHO_CANCELLER:
-			appl->appl_flags |= APPL_FLAG_PRIV_EC_SPEC;
-			return (ec_request(Id, Number, a, plci, appl, msg));
-
-		case SELECTOR_ECHO_CANCELLER:
-			appl->appl_flags &= ~APPL_FLAG_PRIV_EC_SPEC;
-			return (ec_request(Id, Number, a, plci, appl, msg));
-
-
-		case SELECTOR_V42BIS:
-		default:
-			Info = _FACILITY_NOT_SUPPORTED;
-			break;
-		} /* end of switch (selector) */
-	}
-
-	dbug(1, dprintf("SendFacRc"));
-	sendf(appl,
-	      _FACILITY_R | CONFIRM,
-	      Id,
-	      Number,
-	      "wws", Info, selector, SSparms);
-	return false;
-}
-
-static byte facility_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			 PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	dbug(1, dprintf("facility_res"));
-	return false;
-}
-
-static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			   PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word Info = 0;
-	byte req;
-	byte len;
-	word w;
-	word fax_control_bits, fax_feature_bits, fax_info_change;
-	API_PARSE *ncpi;
-	byte pvc[2];
-
-	API_PARSE fax_parms[9];
-	word i;
-
-
-	dbug(1, dprintf("connect_b3_req"));
-	if (plci)
-	{
-		if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING)
-		    || (plci->State == INC_DIS_PENDING) || (plci->SuppState != IDLE))
-		{
-			Info = _WRONG_STATE;
-		}
-		else
-		{
-			/* local reply if assign unsuccessful
-			   or B3 protocol allows only one layer 3 connection
-			   and already connected
-			   or B2 protocol not any LAPD
-			   and connect_b3_req contradicts originate/answer direction */
-			if (!plci->NL.Id
-			    || (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
-				&& ((plci->channels != 0)
-				    || (((plci->B2_prot != B2_SDLC) && (plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL))
-					&& ((plci->call_dir & CALL_DIR_ANSWER) && !(plci->call_dir & CALL_DIR_FORCE_OUTG_NL))))))
-			{
-				dbug(1, dprintf("B3 already connected=%d or no NL.Id=0x%x, dir=%d sstate=0x%x",
-						plci->channels, plci->NL.Id, plci->call_dir, plci->SuppState));
-				Info = _WRONG_STATE;
-				sendf(appl,
-				      _CONNECT_B3_R | CONFIRM,
-				      Id,
-				      Number,
-				      "w", Info);
-				return false;
-			}
-			plci->requested_options_conn = 0;
-
-			req = N_CONNECT;
-			ncpi = &parms[0];
-			if (plci->B3_prot == 2 || plci->B3_prot == 3)
-			{
-				if (ncpi->length > 2)
-				{
-					/* check for PVC */
-					if (ncpi->info[2] || ncpi->info[3])
-					{
-						pvc[0] = ncpi->info[3];
-						pvc[1] = ncpi->info[2];
-						add_d(plci, 2, pvc);
-						req = N_RESET;
-					}
-					else
-					{
-						if (ncpi->info[1] & 1) req = N_CONNECT | N_D_BIT;
-						add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
-					}
-				}
-			}
-			else if (plci->B3_prot == 5)
-			{
-				if (plci->NL.Id && !plci->nl_remove_id)
-				{
-					fax_control_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low);
-					fax_feature_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low);
-					if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS)
-					    || (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS))
-					{
-						len = offsetof(T30_INFO, universal_6);
-						fax_info_change = false;
-						if (ncpi->length >= 4)
-						{
-							w = GET_WORD(&ncpi->info[3]);
-							if ((w & 0x0001) != ((word)(((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & 0x0001)))
-							{
-								((T30_INFO *)(plci->fax_connect_info_buffer))->resolution =
-									(byte)((((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & ~T30_RESOLUTION_R8_0770_OR_200) |
-									       ((w & 0x0001) ? T30_RESOLUTION_R8_0770_OR_200 : 0));
-								fax_info_change = true;
-							}
-							fax_control_bits &= ~(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
-							if (w & 0x0002)  /* Fax-polling request */
-								fax_control_bits |= T30_CONTROL_BIT_REQUEST_POLLING;
-							if ((w & 0x0004) /* Request to send / poll another document */
-							    && (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS))
-							{
-								fax_control_bits |= T30_CONTROL_BIT_MORE_DOCUMENTS;
-							}
-							if (ncpi->length >= 6)
-							{
-								w = GET_WORD(&ncpi->info[5]);
-								if (((byte) w) != ((T30_INFO *)(plci->fax_connect_info_buffer))->data_format)
-								{
-									((T30_INFO *)(plci->fax_connect_info_buffer))->data_format = (byte) w;
-									fax_info_change = true;
-								}
-
-								if ((a->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
-								    && (GET_WORD(&ncpi->info[5]) & 0x8000)) /* Private SEP/SUB/PWD enable */
-								{
-									plci->requested_options_conn |= (1L << PRIVATE_FAX_SUB_SEP_PWD);
-								}
-								if ((a->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
-								    && (GET_WORD(&ncpi->info[5]) & 0x4000)) /* Private non-standard facilities enable */
-								{
-									plci->requested_options_conn |= (1L << PRIVATE_FAX_NONSTANDARD);
-								}
-								fax_control_bits &= ~(T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_SEL_POLLING |
-										      T30_CONTROL_BIT_ACCEPT_PASSWORD);
-								if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
-								    & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
-								{
-									if (api_parse(&ncpi->info[1], ncpi->length, "wwwwsss", fax_parms))
-										Info = _WRONG_MESSAGE_FORMAT;
-									else
-									{
-										if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
-										    & (1L << PRIVATE_FAX_SUB_SEP_PWD))
-										{
-											fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
-											if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
-												fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
-										}
-										w = fax_parms[4].length;
-										if (w > 20)
-											w = 20;
-										((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = (byte) w;
-										for (i = 0; i < w; i++)
-											((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1 + i];
-										((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
-										len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
-										w = fax_parms[5].length;
-										if (w > 20)
-											w = 20;
-										plci->fax_connect_info_buffer[len++] = (byte) w;
-										for (i = 0; i < w; i++)
-											plci->fax_connect_info_buffer[len++] = fax_parms[5].info[1 + i];
-										w = fax_parms[6].length;
-										if (w > 20)
-											w = 20;
-										plci->fax_connect_info_buffer[len++] = (byte) w;
-										for (i = 0; i < w; i++)
-											plci->fax_connect_info_buffer[len++] = fax_parms[6].info[1 + i];
-										if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
-										    & (1L << PRIVATE_FAX_NONSTANDARD))
-										{
-											if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
-											{
-												dbug(1, dprintf("non-standard facilities info missing or wrong format"));
-												plci->fax_connect_info_buffer[len++] = 0;
-											}
-											else
-											{
-												if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
-													plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
-												plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
-												for (i = 0; i < fax_parms[7].length; i++)
-													plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
-											}
-										}
-									}
-								}
-								else
-								{
-									len = offsetof(T30_INFO, universal_6);
-								}
-								fax_info_change = true;
-
-							}
-							if (fax_control_bits != GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low))
-							{
-								PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low, fax_control_bits);
-								fax_info_change = true;
-							}
-						}
-						if (Info == GOOD)
-						{
-							plci->fax_connect_info_length = len;
-							if (fax_info_change)
-							{
-								if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
-								{
-									start_internal_command(Id, plci, fax_connect_info_command);
-									return false;
-								}
-								else
-								{
-									start_internal_command(Id, plci, fax_adjust_b23_command);
-									return false;
-								}
-							}
-						}
-					}
-					else  Info = _WRONG_STATE;
-				}
-				else  Info = _WRONG_STATE;
-			}
-
-			else if (plci->B3_prot == B3_RTP)
-			{
-				plci->internal_req_buffer[0] = ncpi->length + 1;
-				plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
-				for (w = 0; w < ncpi->length; w++)
-					plci->internal_req_buffer[2 + w] = ncpi->info[1 + w];
-				start_internal_command(Id, plci, rtp_connect_b3_req_command);
-				return false;
-			}
-
-			if (!Info)
-			{
-				nl_req_ncci(plci, req, 0);
-				return 1;
-			}
-		}
-	}
-	else Info = _WRONG_IDENTIFIER;
-
-	sendf(appl,
-	      _CONNECT_B3_R | CONFIRM,
-	      Id,
-	      Number,
-	      "w", Info);
-	return false;
-}
-
-static byte connect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			   PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word ncci;
-	API_PARSE *ncpi;
-	byte req;
-
-	word w;
-
-
-	API_PARSE fax_parms[9];
-	word i;
-	byte len;
-
-
-	dbug(1, dprintf("connect_b3_res"));
-
-	ncci = (word)(Id >> 16);
-	if (plci && ncci) {
-		if (a->ncci_state[ncci] == INC_CON_PENDING) {
-			if (GET_WORD(&parms[0].info[0]) != 0)
-			{
-				a->ncci_state[ncci] = OUTG_REJ_PENDING;
-				channel_request_xon(plci, a->ncci_ch[ncci]);
-				channel_xmit_xon(plci);
-				cleanup_ncci_data(plci, ncci);
-				nl_req_ncci(plci, N_DISC, (byte)ncci);
-				return 1;
-			}
-			a->ncci_state[ncci] = INC_ACT_PENDING;
-
-			req = N_CONNECT_ACK;
-			ncpi = &parms[1];
-			if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
-			{
-
-				if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
-				    & (1L << PRIVATE_FAX_NONSTANDARD))
-				{
-					if (((plci->B3_prot == 4) || (plci->B3_prot == 5))
-					    && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
-					    && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
-					{
-						len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
-						if (plci->fax_connect_info_length < len)
-						{
-							((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
-							((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
-						}
-						if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
-						{
-							dbug(1, dprintf("non-standard facilities info missing or wrong format"));
-						}
-						else
-						{
-							if (plci->fax_connect_info_length <= len)
-								plci->fax_connect_info_buffer[len] = 0;
-							len += 1 + plci->fax_connect_info_buffer[len];
-							if (plci->fax_connect_info_length <= len)
-								plci->fax_connect_info_buffer[len] = 0;
-							len += 1 + plci->fax_connect_info_buffer[len];
-							if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
-								plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
-							plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
-							for (i = 0; i < fax_parms[7].length; i++)
-								plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
-						}
-						plci->fax_connect_info_length = len;
-						((T30_INFO *)(plci->fax_connect_info_buffer))->code = 0;
-						start_internal_command(Id, plci, fax_connect_ack_command);
-						return false;
-					}
-				}
-
-				nl_req_ncci(plci, req, (byte)ncci);
-				if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-				    && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
-				{
-					if (plci->B3_prot == 4)
-						sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-					else
-						sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
-					plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
-				}
-			}
-
-			else if (plci->B3_prot == B3_RTP)
-			{
-				plci->internal_req_buffer[0] = ncpi->length + 1;
-				plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
-				for (w = 0; w < ncpi->length; w++)
-					plci->internal_req_buffer[2 + w] = ncpi->info[1+w];
-				start_internal_command(Id, plci, rtp_connect_b3_res_command);
-				return false;
-			}
-
-			else
-			{
-				if (ncpi->length > 2) {
-					if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
-					add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
-				}
-				nl_req_ncci(plci, req, (byte)ncci);
-				sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-				if (plci->adjust_b_restore)
-				{
-					plci->adjust_b_restore = false;
-					start_internal_command(Id, plci, adjust_b_restore);
-				}
-			}
-			return 1;
-		}
-	}
-	return false;
-}
-
-static byte connect_b3_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			     PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word ncci;
-
-	ncci = (word)(Id >> 16);
-	dbug(1, dprintf("connect_b3_a_res(ncci=0x%x)", ncci));
-
-	if (plci && ncci && (plci->State != IDLE) && (plci->State != INC_DIS_PENDING)
-	    && (plci->State != OUTG_DIS_PENDING))
-	{
-		if (a->ncci_state[ncci] == INC_ACT_PENDING) {
-			a->ncci_state[ncci] = CONNECTED;
-			if (plci->State != INC_CON_CONNECTED_ALERT) plci->State = CONNECTED;
-			channel_request_xon(plci, a->ncci_ch[ncci]);
-			channel_xmit_xon(plci);
-		}
-	}
-	return false;
-}
-
-static byte disconnect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			      PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word Info;
-	word ncci;
-	API_PARSE *ncpi;
-
-	dbug(1, dprintf("disconnect_b3_req"));
-
-	Info = _WRONG_IDENTIFIER;
-	ncci = (word)(Id >> 16);
-	if (plci && ncci)
-	{
-		Info = _WRONG_STATE;
-		if ((a->ncci_state[ncci] == CONNECTED)
-		    || (a->ncci_state[ncci] == OUTG_CON_PENDING)
-		    || (a->ncci_state[ncci] == INC_CON_PENDING)
-		    || (a->ncci_state[ncci] == INC_ACT_PENDING))
-		{
-			a->ncci_state[ncci] = OUTG_DIS_PENDING;
-			channel_request_xon(plci, a->ncci_ch[ncci]);
-			channel_xmit_xon(plci);
-
-			if (a->ncci[ncci].data_pending
-			    && ((plci->B3_prot == B3_TRANSPARENT)
-				|| (plci->B3_prot == B3_T30)
-				|| (plci->B3_prot == B3_T30_WITH_EXTENSIONS)))
-			{
-				plci->send_disc = (byte)ncci;
-				plci->command = 0;
-				return false;
-			}
-			else
-			{
-				cleanup_ncci_data(plci, ncci);
-
-				if (plci->B3_prot == 2 || plci->B3_prot == 3)
-				{
-					ncpi = &parms[0];
-					if (ncpi->length > 3)
-					{
-						add_d(plci, (word)(ncpi->length - 3), (byte *)&(ncpi->info[4]));
-					}
-				}
-				nl_req_ncci(plci, N_DISC, (byte)ncci);
-			}
-			return 1;
-		}
-	}
-	sendf(appl,
-	      _DISCONNECT_B3_R | CONFIRM,
-	      Id,
-	      Number,
-	      "w", Info);
-	return false;
-}
-
-static byte disconnect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			      PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word ncci;
-	word i;
-
-	ncci = (word)(Id >> 16);
-	dbug(1, dprintf("disconnect_b3_res(ncci=0x%x", ncci));
-	if (plci && ncci) {
-		plci->requested_options_conn = 0;
-		plci->fax_connect_info_length = 0;
-		plci->ncpi_state = 0x00;
-		if (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
-		    && ((plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL)))
-		{
-			plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
-		}
-		for (i = 0; i < MAX_CHANNELS_PER_PLCI && plci->inc_dis_ncci_table[i] != (byte)ncci; i++);
-		if (i < MAX_CHANNELS_PER_PLCI) {
-			if (plci->channels)plci->channels--;
-			for (; i < MAX_CHANNELS_PER_PLCI - 1; i++) plci->inc_dis_ncci_table[i] = plci->inc_dis_ncci_table[i + 1];
-			plci->inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI - 1] = 0;
-
-			ncci_free_receive_buffers(plci, ncci);
-
-			if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
-				if (plci->State == SUSPENDING) {
-					sendf(plci->appl,
-					      _FACILITY_I,
-					      Id & 0xffffL,
-					      0,
-					      "ws", (word)3, "\x03\x04\x00\x00");
-					sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
-				}
-				plci_remove(plci);
-				plci->State = IDLE;
-			}
-		}
-		else
-		{
-			if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
-			    && ((plci->B3_prot == 4) || (plci->B3_prot == 5))
-			    && (a->ncci_state[ncci] == INC_DIS_PENDING))
-			{
-				ncci_free_receive_buffers(plci, ncci);
-
-				nl_req_ncci(plci, N_EDATA, (byte)ncci);
-
-				plci->adapter->ncci_state[ncci] = IDLE;
-				start_internal_command(Id, plci, fax_disconnect_command);
-				return 1;
-			}
-		}
-	}
-	return false;
-}
-
-static byte data_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	NCCI *ncci_ptr;
-	DATA_B3_DESC *data;
-	word Info;
-	word ncci;
-	word i;
-
-	dbug(1, dprintf("data_b3_req"));
-
-	Info = _WRONG_IDENTIFIER;
-	ncci = (word)(Id >> 16);
-	dbug(1, dprintf("ncci=0x%x, plci=0x%x", ncci, plci));
-
-	if (plci && ncci)
-	{
-		Info = _WRONG_STATE;
-		if ((a->ncci_state[ncci] == CONNECTED)
-		    || (a->ncci_state[ncci] == INC_ACT_PENDING))
-		{
-			/* queue data */
-			ncci_ptr = &(a->ncci[ncci]);
-			i = ncci_ptr->data_out + ncci_ptr->data_pending;
-			if (i >= MAX_DATA_B3)
-				i -= MAX_DATA_B3;
-			data = &(ncci_ptr->DBuffer[i]);
-			data->Number = Number;
-			if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
-			    && (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
-			{
-
-				data->P = (byte *)(long)(*((dword *)(parms[0].info)));
-
-			}
-			else
-				data->P = TransmitBufferSet(appl, *(dword *)parms[0].info);
-			data->Length = GET_WORD(parms[1].info);
-			data->Handle = GET_WORD(parms[2].info);
-			data->Flags = GET_WORD(parms[3].info);
-			(ncci_ptr->data_pending)++;
-
-			/* check for delivery confirmation */
-			if (data->Flags & 0x0004)
-			{
-				i = ncci_ptr->data_ack_out + ncci_ptr->data_ack_pending;
-				if (i >= MAX_DATA_ACK)
-					i -= MAX_DATA_ACK;
-				ncci_ptr->DataAck[i].Number = data->Number;
-				ncci_ptr->DataAck[i].Handle = data->Handle;
-				(ncci_ptr->data_ack_pending)++;
-			}
-
-			send_data(plci);
-			return false;
-		}
-	}
-	if (appl)
-	{
-		if (plci)
-		{
-			if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
-			    && (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
-			{
-
-				TransmitBufferFree(appl, (byte *)(long)(*((dword *)(parms[0].info))));
-
-			}
-		}
-		sendf(appl,
-		      _DATA_B3_R | CONFIRM,
-		      Id,
-		      Number,
-		      "ww", GET_WORD(parms[2].info), Info);
-	}
-	return false;
-}
-
-static byte data_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word n;
-	word ncci;
-	word NCCIcode;
-
-	dbug(1, dprintf("data_b3_res"));
-
-	ncci = (word)(Id >> 16);
-	if (plci && ncci) {
-		n = GET_WORD(parms[0].info);
-		dbug(1, dprintf("free(%d)", n));
-		NCCIcode = ncci | (((word) a->Id) << 8);
-		if (n < appl->MaxBuffer &&
-		    appl->DataNCCI[n] == NCCIcode &&
-		    (byte)(appl->DataFlags[n] >> 8) == plci->Id) {
-			dbug(1, dprintf("found"));
-			appl->DataNCCI[n] = 0;
-
-			if (channel_can_xon(plci, a->ncci_ch[ncci])) {
-				channel_request_xon(plci, a->ncci_ch[ncci]);
-			}
-			channel_xmit_xon(plci);
-
-			if (appl->DataFlags[n] & 4) {
-				nl_req_ncci(plci, N_DATA_ACK, (byte)ncci);
-				return 1;
-			}
-		}
-	}
-	return false;
-}
-
-static byte reset_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			 PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word Info;
-	word ncci;
-
-	dbug(1, dprintf("reset_b3_req"));
-
-	Info = _WRONG_IDENTIFIER;
-	ncci = (word)(Id >> 16);
-	if (plci && ncci)
-	{
-		Info = _WRONG_STATE;
-		switch (plci->B3_prot)
-		{
-		case B3_ISO8208:
-		case B3_X25_DCE:
-			if (a->ncci_state[ncci] == CONNECTED)
-			{
-				nl_req_ncci(plci, N_RESET, (byte)ncci);
-				send_req(plci);
-				Info = GOOD;
-			}
-			break;
-		case B3_TRANSPARENT:
-			if (a->ncci_state[ncci] == CONNECTED)
-			{
-				start_internal_command(Id, plci, reset_b3_command);
-				Info = GOOD;
-			}
-			break;
-		}
-	}
-	/* reset_b3 must result in a reset_b3_con & reset_b3_Ind */
-	sendf(appl,
-	      _RESET_B3_R | CONFIRM,
-	      Id,
-	      Number,
-	      "w", Info);
-	return false;
-}
-
-static byte reset_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			 PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word ncci;
-
-	dbug(1, dprintf("reset_b3_res"));
-
-	ncci = (word)(Id >> 16);
-	if (plci && ncci) {
-		switch (plci->B3_prot)
-		{
-		case B3_ISO8208:
-		case B3_X25_DCE:
-			if (a->ncci_state[ncci] == INC_RES_PENDING)
-			{
-				a->ncci_state[ncci] = CONNECTED;
-				nl_req_ncci(plci, N_RESET_ACK, (byte)ncci);
-				return true;
-			}
-			break;
-		}
-	}
-	return false;
-}
-
-static byte connect_b3_t90_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-				 PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word ncci;
-	API_PARSE *ncpi;
-	byte req;
-
-	dbug(1, dprintf("connect_b3_t90_a_res"));
-
-	ncci = (word)(Id >> 16);
-	if (plci && ncci) {
-		if (a->ncci_state[ncci] == INC_ACT_PENDING) {
-			a->ncci_state[ncci] = CONNECTED;
-		}
-		else if (a->ncci_state[ncci] == INC_CON_PENDING) {
-			a->ncci_state[ncci] = CONNECTED;
-
-			req = N_CONNECT_ACK;
-
-			/* parms[0]==0 for CAPI original message definition! */
-			if (parms[0].info) {
-				ncpi = &parms[1];
-				if (ncpi->length > 2) {
-					if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
-					add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
-				}
-			}
-			nl_req_ncci(plci, req, (byte)ncci);
-			return 1;
-		}
-	}
-	return false;
-}
-
-
-static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			 PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	word Info = 0;
-	word i;
-	byte tel;
-	API_PARSE bp_parms[7];
-
-	if (!plci || !msg)
-	{
-		Info = _WRONG_IDENTIFIER;
-	}
-	else
-	{
-		dbug(1, dprintf("select_b_req[%d],PLCI=0x%x,Tel=0x%x,NL=0x%x,appl=0x%x,sstate=0x%x",
-				msg->length, plci->Id, plci->tel, plci->NL.Id, plci->appl, plci->SuppState));
-		dbug(1, dprintf("PlciState=0x%x", plci->State));
-		for (i = 0; i < 7; i++) bp_parms[i].length = 0;
-
-		/* check if no channel is open, no B3 connected only */
-		if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING) || (plci->State == INC_DIS_PENDING)
-		    || (plci->SuppState != IDLE) || plci->channels || plci->nl_remove_id)
-		{
-			Info = _WRONG_STATE;
-		}
-		/* check message format and fill bp_parms pointer */
-		else if (msg->length && api_parse(&msg->info[1], (word)msg->length, "wwwsss", bp_parms))
-		{
-			Info = _WRONG_MESSAGE_FORMAT;
-		}
-		else
-		{
-			if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT)) /* send alert tone inband to the network, */
-			{                                                                  /* e.g. Qsig or RBS or Cornet-N or xess PRI */
-				if (Id & EXT_CONTROLLER)
-				{
-					sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", 0x2002); /* wrong controller */
-					return 0;
-				}
-				plci->State = INC_CON_CONNECTED_ALERT;
-				plci->appl = appl;
-				__clear_bit(appl->Id - 1, plci->c_ind_mask_table);
-				dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
-				/* disconnect the other appls its quasi a connect */
-				for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
-					sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
-			}
-
-			api_save_msg(msg, "s", &plci->saved_msg);
-			tel = plci->tel;
-			if (Id & EXT_CONTROLLER)
-			{
-				if (tel) /* external controller in use by this PLCI */
-				{
-					if (a->AdvSignalAppl && a->AdvSignalAppl != appl)
-					{
-						dbug(1, dprintf("Ext_Ctrl in use 1"));
-						Info = _WRONG_STATE;
-					}
-				}
-				else  /* external controller NOT in use by this PLCI ? */
-				{
-					if (a->AdvSignalPLCI)
-					{
-						dbug(1, dprintf("Ext_Ctrl in use 2"));
-						Info = _WRONG_STATE;
-					}
-					else /* activate the codec */
-					{
-						dbug(1, dprintf("Ext_Ctrl start"));
-						if (AdvCodecSupport(a, plci, appl, 0))
-						{
-							dbug(1, dprintf("Error in codec procedures"));
-							Info = _WRONG_STATE;
-						}
-						else if (plci->spoofed_msg == SPOOFING_REQUIRED) /* wait until codec is active */
-						{
-							plci->spoofed_msg = AWAITING_SELECT_B;
-							plci->internal_command = BLOCK_PLCI; /* lock other commands */
-							plci->command = 0;
-							dbug(1, dprintf("continue if codec loaded"));
-							return false;
-						}
-					}
-				}
-			}
-			else /* external controller bit is OFF */
-			{
-				if (tel) /* external controller in use, need to switch off */
-				{
-					if (a->AdvSignalAppl == appl)
-					{
-						CodecIdCheck(a, plci);
-						plci->tel = 0;
-						plci->adv_nl = 0;
-						dbug(1, dprintf("Ext_Ctrl disable"));
-					}
-					else
-					{
-						dbug(1, dprintf("Ext_Ctrl not requested"));
-					}
-				}
-			}
-			if (!Info)
-			{
-				if (plci->call_dir & CALL_DIR_OUT)
-					plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
-				else if (plci->call_dir & CALL_DIR_IN)
-					plci->call_dir = CALL_DIR_IN | CALL_DIR_ANSWER;
-				start_internal_command(Id, plci, select_b_command);
-				return false;
-			}
-		}
-	}
-	sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", Info);
-	return false;
-}
-
-static byte manufacturer_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			     PLCI *plci, APPL *appl, API_PARSE *parms)
-{
-	word command;
-	word i;
-	word ncci;
-	API_PARSE *m;
-	API_PARSE m_parms[5];
-	word codec;
-	byte req;
-	byte ch;
-	byte dir;
-	static byte chi[2] = {0x01, 0x00};
-	static byte lli[2] = {0x01, 0x00};
-	static byte codec_cai[2] = {0x01, 0x01};
-	static byte null_msg = {0};
-	static API_PARSE null_parms = { 0, &null_msg };
-	PLCI *v_plci;
-	word Info = 0;
-
-	dbug(1, dprintf("manufacturer_req"));
-	for (i = 0; i < 5; i++) m_parms[i].length = 0;
-
-	if (GET_DWORD(parms[0].info) != _DI_MANU_ID) {
-		Info = _WRONG_MESSAGE_FORMAT;
-	}
-	command = GET_WORD(parms[1].info);
-	m = &parms[2];
-	if (!Info)
-	{
-		switch (command) {
-		case _DI_ASSIGN_PLCI:
-			if (api_parse(&m->info[1], (word)m->length, "wbbs", m_parms)) {
-				Info = _WRONG_MESSAGE_FORMAT;
-				break;
-			}
-			codec = GET_WORD(m_parms[0].info);
-			ch = m_parms[1].info[0];
-			dir = m_parms[2].info[0];
-			if ((i = get_plci(a))) {
-				plci = &a->plci[i - 1];
-				plci->appl = appl;
-				plci->command = _MANUFACTURER_R;
-				plci->m_command = command;
-				plci->number = Number;
-				plci->State = LOCAL_CONNECT;
-				Id = (((word)plci->Id << 8) | plci->adapter->Id | 0x80);
-				dbug(1, dprintf("ManCMD,plci=0x%x", Id));
-
-				if ((ch == 1 || ch == 2) && (dir <= 2)) {
-					chi[1] = (byte)(0x80 | ch);
-					lli[1] = 0;
-					plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
-					switch (codec)
-					{
-					case 0:
-						Info = add_b1(plci, &m_parms[3], 0, 0);
-						break;
-					case 1:
-						add_p(plci, CAI, codec_cai);
-						break;
-						/* manual 'swich on' to the codec support without signalling */
-						/* first 'assign plci' with this function, then use */
-					case 2:
-						if (AdvCodecSupport(a, plci, appl, 0)) {
-							Info = _RESOURCE_ERROR;
-						}
-						else {
-							Info = add_b1(plci, &null_parms, 0, B1_FACILITY_LOCAL);
-							lli[1] = 0x10; /* local call codec stream */
-						}
-						break;
-					}
-
-					plci->State = LOCAL_CONNECT;
-					plci->manufacturer = true;
-					plci->command = _MANUFACTURER_R;
-					plci->m_command = command;
-					plci->number = Number;
-
-					if (!Info)
-					{
-						add_p(plci, LLI, lli);
-						add_p(plci, CHI, chi);
-						add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-						sig_req(plci, ASSIGN, DSIG_ID);
-
-						if (!codec)
-						{
-							Info = add_b23(plci, &m_parms[3]);
-							if (!Info)
-							{
-								nl_req_ncci(plci, ASSIGN, 0);
-								send_req(plci);
-							}
-						}
-						if (!Info)
-						{
-							dbug(1, dprintf("dir=0x%x,spoof=0x%x", dir, plci->spoofed_msg));
-							if (plci->spoofed_msg == SPOOFING_REQUIRED)
-							{
-								api_save_msg(m_parms, "wbbs", &plci->saved_msg);
-								plci->spoofed_msg = AWAITING_MANUF_CON;
-								plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
-								plci->command = 0;
-								send_req(plci);
-								return false;
-							}
-							if (dir == 1) {
-								sig_req(plci, CALL_REQ, 0);
-							}
-							else if (!dir) {
-								sig_req(plci, LISTEN_REQ, 0);
-							}
-							send_req(plci);
-						}
-						else
-						{
-							sendf(appl,
-							      _MANUFACTURER_R | CONFIRM,
-							      Id,
-							      Number,
-							      "dww", _DI_MANU_ID, command, Info);
-							return 2;
-						}
-					}
-				}
-			}
-			else  Info = _OUT_OF_PLCI;
-			break;
-
-		case _DI_IDI_CTRL:
-			if (!plci)
-			{
-				Info = _WRONG_IDENTIFIER;
-				break;
-			}
-			if (api_parse(&m->info[1], (word)m->length, "bs", m_parms)) {
-				Info = _WRONG_MESSAGE_FORMAT;
-				break;
-			}
-			req = m_parms[0].info[0];
-			plci->command = _MANUFACTURER_R;
-			plci->m_command = command;
-			plci->number = Number;
-			if (req == CALL_REQ)
-			{
-				plci->b_channel = getChannel(&m_parms[1]);
-				mixer_set_bchannel_id_esc(plci, plci->b_channel);
-				if (plci->spoofed_msg == SPOOFING_REQUIRED)
-				{
-					plci->spoofed_msg = CALL_REQ | AWAITING_MANUF_CON;
-					plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
-					plci->command = 0;
-					break;
-				}
-			}
-			else if (req == LAW_REQ)
-			{
-				plci->cr_enquiry = true;
-			}
-			add_ss(plci, FTY, &m_parms[1]);
-			sig_req(plci, req, 0);
-			send_req(plci);
-			if (req == HANGUP)
-			{
-				if (plci->NL.Id && !plci->nl_remove_id)
-				{
-					if (plci->channels)
-					{
-						for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
-						{
-							if ((a->ncci_plci[ncci] == plci->Id) && (a->ncci_state[ncci] == CONNECTED))
-							{
-								a->ncci_state[ncci] = OUTG_DIS_PENDING;
-								cleanup_ncci_data(plci, ncci);
-								nl_req_ncci(plci, N_DISC, (byte)ncci);
-							}
-						}
-					}
-					mixer_remove(plci);
-					nl_req_ncci(plci, REMOVE, 0);
-					send_req(plci);
-				}
-			}
-			break;
-
-		case _DI_SIG_CTRL:
-			/* signalling control for loop activation B-channel */
-			if (!plci)
-			{
-				Info = _WRONG_IDENTIFIER;
-				break;
-			}
-			if (m->length) {
-				plci->command = _MANUFACTURER_R;
-				plci->number = Number;
-				add_ss(plci, FTY, m);
-				sig_req(plci, SIG_CTRL, 0);
-				send_req(plci);
-			}
-			else Info = _WRONG_MESSAGE_FORMAT;
-			break;
-
-		case _DI_RXT_CTRL:
-			/* activation control for receiver/transmitter B-channel */
-			if (!plci)
-			{
-				Info = _WRONG_IDENTIFIER;
-				break;
-			}
-			if (m->length) {
-				plci->command = _MANUFACTURER_R;
-				plci->number = Number;
-				add_ss(plci, FTY, m);
-				sig_req(plci, DSP_CTRL, 0);
-				send_req(plci);
-			}
-			else Info = _WRONG_MESSAGE_FORMAT;
-			break;
-
-		case _DI_ADV_CODEC:
-		case _DI_DSP_CTRL:
-			/* TEL_CTRL commands to support non standard adjustments: */
-			/* Ring on/off, Handset micro volume, external micro vol. */
-			/* handset+external speaker volume, receiver+transm. gain,*/
-			/* handsfree on (hookinfo off), set mixer command         */
-
-			if (command == _DI_ADV_CODEC)
-			{
-				if (!a->AdvCodecPLCI) {
-					Info = _WRONG_STATE;
-					break;
-				}
-				v_plci = a->AdvCodecPLCI;
-			}
-			else
-			{
-				if (plci
-				    && (m->length >= 3)
-				    && (m->info[1] == 0x1c)
-				    && (m->info[2] >= 1))
-				{
-					if (m->info[3] == DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS)
-					{
-						if ((plci->tel != ADV_VOICE) || (plci != a->AdvSignalPLCI))
-						{
-							Info = _WRONG_STATE;
-							break;
-						}
-						a->adv_voice_coef_length = m->info[2] - 1;
-						if (a->adv_voice_coef_length > m->length - 3)
-							a->adv_voice_coef_length = (byte)(m->length - 3);
-						if (a->adv_voice_coef_length > ADV_VOICE_COEF_BUFFER_SIZE)
-							a->adv_voice_coef_length = ADV_VOICE_COEF_BUFFER_SIZE;
-						for (i = 0; i < a->adv_voice_coef_length; i++)
-							a->adv_voice_coef_buffer[i] = m->info[4 + i];
-						if (plci->B1_facilities & B1_FACILITY_VOICE)
-							adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
-						break;
-					}
-					else if (m->info[3] == DSP_CTRL_SET_DTMF_PARAMETERS)
-					{
-						if (!(a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_PARAMETERS))
-						{
-							Info = _FACILITY_NOT_SUPPORTED;
-							break;
-						}
-
-						plci->dtmf_parameter_length = m->info[2] - 1;
-						if (plci->dtmf_parameter_length > m->length - 3)
-							plci->dtmf_parameter_length = (byte)(m->length - 3);
-						if (plci->dtmf_parameter_length > DTMF_PARAMETER_BUFFER_SIZE)
-							plci->dtmf_parameter_length = DTMF_PARAMETER_BUFFER_SIZE;
-						for (i = 0; i < plci->dtmf_parameter_length; i++)
-							plci->dtmf_parameter_buffer[i] = m->info[4 + i];
-						if (plci->B1_facilities & B1_FACILITY_DTMFR)
-							dtmf_parameter_write(plci);
-						break;
-
-					}
-				}
-				v_plci = plci;
-			}
-
-			if (!v_plci)
-			{
-				Info = _WRONG_IDENTIFIER;
-				break;
-			}
-			if (m->length) {
-				add_ss(v_plci, FTY, m);
-				sig_req(v_plci, TEL_CTRL, 0);
-				send_req(v_plci);
-			}
-			else Info = _WRONG_MESSAGE_FORMAT;
-
-			break;
-
-		case _DI_OPTIONS_REQUEST:
-			if (api_parse(&m->info[1], (word)m->length, "d", m_parms)) {
-				Info = _WRONG_MESSAGE_FORMAT;
-				break;
-			}
-			if (GET_DWORD(m_parms[0].info) & ~a->man_profile.private_options)
-			{
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			a->requested_options_table[appl->Id - 1] = GET_DWORD(m_parms[0].info);
-			break;
-
-
-
-		default:
-			Info = _WRONG_MESSAGE_FORMAT;
-			break;
-		}
-	}
-
-	sendf(appl,
-	      _MANUFACTURER_R | CONFIRM,
-	      Id,
-	      Number,
-	      "dww", _DI_MANU_ID, command, Info);
-	return false;
-}
-
-
-static byte manufacturer_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
-			     PLCI *plci, APPL *appl, API_PARSE *msg)
-{
-	word indication;
-
-	API_PARSE m_parms[3];
-	API_PARSE *ncpi;
-	API_PARSE fax_parms[9];
-	word i;
-	byte len;
-
-
-	dbug(1, dprintf("manufacturer_res"));
-
-	if ((msg[0].length == 0)
-	    || (msg[1].length == 0)
-	    || (GET_DWORD(msg[0].info) != _DI_MANU_ID))
-	{
-		return false;
-	}
-	indication = GET_WORD(msg[1].info);
-	switch (indication)
-	{
-
-	case _DI_NEGOTIATE_B3:
-		if (!plci)
-			break;
-		if (((plci->B3_prot != 4) && (plci->B3_prot != 5))
-		    || !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
-		{
-			dbug(1, dprintf("wrong state for NEGOTIATE_B3 parameters"));
-			break;
-		}
-		if (api_parse(&msg[2].info[1], msg[2].length, "ws", m_parms))
-		{
-			dbug(1, dprintf("wrong format in NEGOTIATE_B3 parameters"));
-			break;
-		}
-		ncpi = &m_parms[1];
-		len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
-		if (plci->fax_connect_info_length < len)
-		{
-			((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
-			((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
-		}
-		if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
-		{
-			dbug(1, dprintf("non-standard facilities info missing or wrong format"));
-		}
-		else
-		{
-			if (plci->fax_connect_info_length <= len)
-				plci->fax_connect_info_buffer[len] = 0;
-			len += 1 + plci->fax_connect_info_buffer[len];
-			if (plci->fax_connect_info_length <= len)
-				plci->fax_connect_info_buffer[len] = 0;
-			len += 1 + plci->fax_connect_info_buffer[len];
-			if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
-				plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
-			plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
-			for (i = 0; i < fax_parms[7].length; i++)
-				plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
-		}
-		plci->fax_connect_info_length = len;
-		plci->fax_edata_ack_length = plci->fax_connect_info_length;
-		start_internal_command(Id, plci, fax_edata_ack_command);
-		break;
-
-	}
-	return false;
-}
-
-/*------------------------------------------------------------------*/
-/* IDI callback function                                            */
-/*------------------------------------------------------------------*/
-
-void callback(ENTITY *e)
-{
-	DIVA_CAPI_ADAPTER *a;
-	APPL *appl;
-	PLCI *plci;
-	CAPI_MSG *m;
-	word i, j;
-	byte rc;
-	byte ch;
-	byte req;
-	byte global_req;
-	int no_cancel_rc;
-
-	dbug(1, dprintf("%x:CB(%x:Req=%x,Rc=%x,Ind=%x)",
-			(e->user[0] + 1) & 0x7fff, e->Id, e->Req, e->Rc, e->Ind));
-
-	a = &(adapter[(byte)e->user[0]]);
-	plci = &(a->plci[e->user[1]]);
-	no_cancel_rc = DIVA_CAPI_SUPPORTS_NO_CANCEL(a);
-
-	/*
-	  If new protocol code and new XDI is used then CAPI should work
-	  fully in accordance with IDI cpec an look on callback field instead
-	  of Rc field for return codes.
-	*/
-	if (((e->complete == 0xff) && no_cancel_rc) ||
-	    (e->Rc && !no_cancel_rc)) {
-		rc = e->Rc;
-		ch = e->RcCh;
-		req = e->Req;
-		e->Rc = 0;
-
-		if (e->user[0] & 0x8000)
-		{
-			/*
-			  If REMOVE request was sent then we have to wait until
-			  return code with Id set to zero arrives.
-			  All other return codes should be ignored.
-			*/
-			if (req == REMOVE)
-			{
-				if (e->Id)
-				{
-					dbug(1, dprintf("cancel RC in REMOVE state"));
-					return;
-				}
-				channel_flow_control_remove(plci);
-				for (i = 0; i < 256; i++)
-				{
-					if (a->FlowControlIdTable[i] == plci->nl_remove_id)
-						a->FlowControlIdTable[i] = 0;
-				}
-				plci->nl_remove_id = 0;
-				if (plci->rx_dma_descriptor > 0) {
-					diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
-					plci->rx_dma_descriptor = 0;
-				}
-			}
-			if (rc == OK_FC)
-			{
-				a->FlowControlIdTable[ch] = e->Id;
-				a->FlowControlSkipTable[ch] = 0;
-
-				a->ch_flow_control[ch] |= N_OK_FC_PENDING;
-				a->ch_flow_plci[ch] = plci->Id;
-				plci->nl_req = 0;
-			}
-			else
-			{
-				/*
-				  Cancel return codes self, if feature was requested
-				*/
-				if (no_cancel_rc && (a->FlowControlIdTable[ch] == e->Id) && e->Id) {
-					a->FlowControlIdTable[ch] = 0;
-					if ((rc == OK) && a->FlowControlSkipTable[ch]) {
-						dbug(3, dprintf("XDI CAPI: RC cancelled Id:0x02, Ch:%02x", e->Id, ch));
-						return;
-					}
-				}
-
-				if (a->ch_flow_control[ch] & N_OK_FC_PENDING)
-				{
-					a->ch_flow_control[ch] &= ~N_OK_FC_PENDING;
-					if (ch == e->ReqCh)
-						plci->nl_req = 0;
-				}
-				else
-					plci->nl_req = 0;
-			}
-			if (plci->nl_req)
-				control_rc(plci, 0, rc, ch, 0, true);
-			else
-			{
-				if (req == N_XON)
-				{
-					channel_x_on(plci, ch);
-					if (plci->internal_command)
-						control_rc(plci, req, rc, ch, 0, true);
-				}
-				else
-				{
-					if (plci->nl_global_req)
-					{
-						global_req = plci->nl_global_req;
-						plci->nl_global_req = 0;
-						if (rc != ASSIGN_OK) {
-							e->Id = 0;
-							if (plci->rx_dma_descriptor > 0) {
-								diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
-								plci->rx_dma_descriptor = 0;
-							}
-						}
-						channel_xmit_xon(plci);
-						control_rc(plci, 0, rc, ch, global_req, true);
-					}
-					else if (plci->data_sent)
-					{
-						channel_xmit_xon(plci);
-						plci->data_sent = false;
-						plci->NL.XNum = 1;
-						data_rc(plci, ch);
-						if (plci->internal_command)
-							control_rc(plci, req, rc, ch, 0, true);
-					}
-					else
-					{
-						channel_xmit_xon(plci);
-						control_rc(plci, req, rc, ch, 0, true);
-					}
-				}
-			}
-		}
-		else
-		{
-			/*
-			  If REMOVE request was sent then we have to wait until
-			  return code with Id set to zero arrives.
-			  All other return codes should be ignored.
-			*/
-			if (req == REMOVE)
-			{
-				if (e->Id)
-				{
-					dbug(1, dprintf("cancel RC in REMOVE state"));
-					return;
-				}
-				plci->sig_remove_id = 0;
-			}
-			plci->sig_req = 0;
-			if (plci->sig_global_req)
-			{
-				global_req = plci->sig_global_req;
-				plci->sig_global_req = 0;
-				if (rc != ASSIGN_OK)
-					e->Id = 0;
-				channel_xmit_xon(plci);
-				control_rc(plci, 0, rc, ch, global_req, false);
-			}
-			else
-			{
-				channel_xmit_xon(plci);
-				control_rc(plci, req, rc, ch, 0, false);
-			}
-		}
-		/*
-		  Again: in accordance with IDI spec Rc and Ind can't be delivered in the
-		  same callback. Also if new XDI and protocol code used then jump
-		  direct to finish.
-		*/
-		if (no_cancel_rc) {
-			channel_xmit_xon(plci);
-			goto capi_callback_suffix;
-		}
-	}
-
-	channel_xmit_xon(plci);
-
-	if (e->Ind) {
-		if (e->user[0] & 0x8000) {
-			byte Ind = e->Ind & 0x0f;
-			byte Ch = e->IndCh;
-			if (((Ind == N_DISC) || (Ind == N_DISC_ACK)) &&
-			    (a->ch_flow_plci[Ch] == plci->Id)) {
-				if (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK) {
-					dbug(3, dprintf("XDI CAPI: I: pending N-XON Ch:%02x", Ch));
-				}
-				a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
-			}
-			nl_ind(plci);
-			if ((e->RNR != 1) &&
-			    (a->ch_flow_plci[Ch] == plci->Id) &&
-			    (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK)) {
-				a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
-				dbug(3, dprintf("XDI CAPI: I: remove faked N-XON Ch:%02x", Ch));
-			}
-		} else {
-			sig_ind(plci);
-		}
-		e->Ind = 0;
-	}
-
-capi_callback_suffix:
-
-	while (!plci->req_in
-	       && !plci->internal_command
-	       && (plci->msg_in_write_pos != plci->msg_in_read_pos))
-	{
-		j = (plci->msg_in_read_pos == plci->msg_in_wrap_pos) ? 0 : plci->msg_in_read_pos;
-
-		i = (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]))->header.length + 3) & 0xfffc;
-
-		m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
-		appl = *((APPL **)(&((byte *)(plci->msg_in_queue))[j + i]));
-		dbug(1, dprintf("dequeue msg(0x%04x) - write=%d read=%d wrap=%d",
-				m->header.command, plci->msg_in_write_pos, plci->msg_in_read_pos, plci->msg_in_wrap_pos));
-		if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
-		{
-			plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
-			plci->msg_in_read_pos = i + MSG_IN_OVERHEAD;
-		}
-		else
-		{
-			plci->msg_in_read_pos = j + i + MSG_IN_OVERHEAD;
-		}
-		if (plci->msg_in_read_pos == plci->msg_in_write_pos)
-		{
-			plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
-			plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
-		}
-		else if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
-		{
-			plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
-			plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
-		}
-		i = api_put(appl, m);
-		if (i != 0)
-		{
-			if (m->header.command == _DATA_B3_R)
-
-				TransmitBufferFree(appl, (byte *)(long)(m->info.data_b3_req.Data));
-
-			dbug(1, dprintf("Error 0x%04x from msg(0x%04x)", i, m->header.command));
-			break;
-		}
-
-		if (plci->li_notify_update)
-		{
-			plci->li_notify_update = false;
-			mixer_notify_update(plci, false);
-		}
-
-	}
-	send_data(plci);
-	send_req(plci);
-}
-
-
-static void control_rc(PLCI *plci, byte req, byte rc, byte ch, byte global_req,
-		       byte nl_rc)
-{
-	dword Id;
-	dword rId;
-	word Number;
-	word Info = 0;
-	word i;
-	word ncci;
-	DIVA_CAPI_ADAPTER *a;
-	APPL *appl;
-	PLCI *rplci;
-	byte SSparms[] = "\x05\x00\x00\x02\x00\x00";
-	byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
-
-	if (!plci) {
-		dbug(0, dprintf("A: control_rc, no plci %02x:%02x:%02x:%02x:%02x", req, rc, ch, global_req, nl_rc));
-		return;
-	}
-	dbug(1, dprintf("req0_in/out=%d/%d", plci->req_in, plci->req_out));
-	if (plci->req_in != plci->req_out)
-	{
-		if (nl_rc || (global_req != ASSIGN) || (rc == ASSIGN_OK))
-		{
-			dbug(1, dprintf("req_1return"));
-			return;
-		}
-		/* cancel outstanding request on the PLCI after SIG ASSIGN failure */
-	}
-	plci->req_in = plci->req_in_start = plci->req_out = 0;
-	dbug(1, dprintf("control_rc"));
-
-	appl = plci->appl;
-	a = plci->adapter;
-	ncci = a->ch_ncci[ch];
-	if (appl)
-	{
-		Id = (((dword)(ncci ? ncci : ch)) << 16) | ((word)plci->Id << 8) | a->Id;
-		if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
-		Number = plci->number;
-		dbug(1, dprintf("Contr_RC-Id=%08lx,plci=%x,tel=%x, entity=0x%x, command=0x%x, int_command=0x%x", Id, plci->Id, plci->tel, plci->Sig.Id, plci->command, plci->internal_command));
-		dbug(1, dprintf("channels=0x%x", plci->channels));
-		if (plci_remove_check(plci))
-			return;
-		if (req == REMOVE && rc == ASSIGN_OK)
-		{
-			sig_req(plci, HANGUP, 0);
-			sig_req(plci, REMOVE, 0);
-			send_req(plci);
-		}
-		if (plci->command)
-		{
-			switch (plci->command)
-			{
-			case C_HOLD_REQ:
-				dbug(1, dprintf("HoldRC=0x%x", rc));
-				SSparms[1] = (byte)S_HOLD;
-				if (rc != OK)
-				{
-					plci->SuppState = IDLE;
-					Info = 0x2001;
-				}
-				sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
-				break;
-
-			case C_RETRIEVE_REQ:
-				dbug(1, dprintf("RetrieveRC=0x%x", rc));
-				SSparms[1] = (byte)S_RETRIEVE;
-				if (rc != OK)
-				{
-					plci->SuppState = CALL_HELD;
-					Info = 0x2001;
-				}
-				sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
-				break;
-
-			case _INFO_R:
-				dbug(1, dprintf("InfoRC=0x%x", rc));
-				if (rc != OK) Info = _WRONG_STATE;
-				sendf(appl, _INFO_R | CONFIRM, Id, Number, "w", Info);
-				break;
-
-			case _CONNECT_R:
-				dbug(1, dprintf("Connect_R=0x%x/0x%x/0x%x/0x%x", req, rc, global_req, nl_rc));
-				if (plci->State == INC_DIS_PENDING)
-					break;
-				if (plci->Sig.Id != 0xff)
-				{
-					if (((global_req == ASSIGN) && (rc != ASSIGN_OK))
-					    || (!nl_rc && (req == CALL_REQ) && (rc != OK)))
-					{
-						dbug(1, dprintf("No more IDs/Call_Req failed"));
-						sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
-						plci_remove(plci);
-						plci->State = IDLE;
-						break;
-					}
-					if (plci->State != LOCAL_CONNECT) plci->State = OUTG_CON_PENDING;
-					sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
-				}
-				else /* D-ch activation */
-				{
-					if (rc != ASSIGN_OK)
-					{
-						dbug(1, dprintf("No more IDs/X.25 Call_Req failed"));
-						sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
-						plci_remove(plci);
-						plci->State = IDLE;
-						break;
-					}
-					sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
-					sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "sss", "", "", "");
-					plci->State = INC_ACT_PENDING;
-				}
-				break;
-
-			case _CONNECT_I | RESPONSE:
-				if (plci->State != INC_DIS_PENDING)
-					plci->State = INC_CON_ACCEPT;
-				break;
-
-			case _DISCONNECT_R:
-				if (plci->State == INC_DIS_PENDING)
-					break;
-				if (plci->Sig.Id != 0xff)
-				{
-					plci->State = OUTG_DIS_PENDING;
-					sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
-				}
-				break;
-
-			case SUSPEND_REQ:
-				break;
-
-			case RESUME_REQ:
-				break;
-
-			case _CONNECT_B3_R:
-				if (rc != OK)
-				{
-					sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
-					break;
-				}
-				ncci = get_ncci(plci, ch, 0);
-				Id = (Id & 0xffff) | (((dword) ncci) << 16);
-				plci->channels++;
-				if (req == N_RESET)
-				{
-					a->ncci_state[ncci] = INC_ACT_PENDING;
-					sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
-					sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-				}
-				else
-				{
-					a->ncci_state[ncci] = OUTG_CON_PENDING;
-					sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
-				}
-				break;
-
-			case _CONNECT_B3_I | RESPONSE:
-				break;
-
-			case _RESET_B3_R:
-/*        sendf(appl, _RESET_B3_R | CONFIRM, Id, Number, "w", 0);*/
-				break;
-
-			case _DISCONNECT_B3_R:
-				sendf(appl, _DISCONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
-				break;
-
-			case _MANUFACTURER_R:
-				break;
-
-			case PERM_LIST_REQ:
-				if (rc != OK)
-				{
-					Info = _WRONG_IDENTIFIER;
-					sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
-					plci_remove(plci);
-				}
-				else
-					sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
-				break;
-
-			default:
-				break;
-			}
-			plci->command = 0;
-		}
-		else if (plci->internal_command)
-		{
-			switch (plci->internal_command)
-			{
-			case BLOCK_PLCI:
-				return;
-
-			case GET_MWI_STATE:
-				if (rc == OK) /* command supported, wait for indication */
-				{
-					return;
-				}
-				plci_remove(plci);
-				break;
-
-				/* Get Supported Services */
-			case GETSERV_REQ_PEND:
-				if (rc == OK) /* command supported, wait for indication */
-				{
-					break;
-				}
-				PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
-				sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", 0, 3, SSstruct);
-				plci_remove(plci);
-				break;
-
-			case INTERR_DIVERSION_REQ_PEND:      /* Interrogate Parameters        */
-			case INTERR_NUMBERS_REQ_PEND:
-			case CF_START_PEND:                  /* Call Forwarding Start pending */
-			case CF_STOP_PEND:                   /* Call Forwarding Stop pending  */
-			case CCBS_REQUEST_REQ_PEND:
-			case CCBS_DEACTIVATE_REQ_PEND:
-			case CCBS_INTERROGATE_REQ_PEND:
-				switch (plci->internal_command)
-				{
-				case INTERR_DIVERSION_REQ_PEND:
-					SSparms[1] = S_INTERROGATE_DIVERSION;
-					break;
-				case INTERR_NUMBERS_REQ_PEND:
-					SSparms[1] = S_INTERROGATE_NUMBERS;
-					break;
-				case CF_START_PEND:
-					SSparms[1] = S_CALL_FORWARDING_START;
-					break;
-				case CF_STOP_PEND:
-					SSparms[1] = S_CALL_FORWARDING_STOP;
-					break;
-				case CCBS_REQUEST_REQ_PEND:
-					SSparms[1] = S_CCBS_REQUEST;
-					break;
-				case CCBS_DEACTIVATE_REQ_PEND:
-					SSparms[1] = S_CCBS_DEACTIVATE;
-					break;
-				case CCBS_INTERROGATE_REQ_PEND:
-					SSparms[1] = S_CCBS_INTERROGATE;
-					break;
-				}
-				if (global_req == ASSIGN)
-				{
-					dbug(1, dprintf("AssignDiversion_RC=0x%x/0x%x", req, rc));
-					return;
-				}
-				if (!plci->appl) break;
-				if (rc == ISDN_GUARD_REJ)
-				{
-					Info = _CAPI_GUARD_ERROR;
-				}
-				else if (rc != OK)
-				{
-					Info = _SUPPLEMENTARY_SERVICE_NOT_SUPPORTED;
-				}
-				sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7,
-				      plci->number, "wws", Info, (word)3, SSparms);
-				if (Info) plci_remove(plci);
-				break;
-
-				/* 3pty conference pending */
-			case PTY_REQ_PEND:
-				if (!plci->relatedPTYPLCI) break;
-				rplci = plci->relatedPTYPLCI;
-				SSparms[1] = plci->ptyState;
-				rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
-				if (rplci->tel) rId |= EXT_CONTROLLER;
-				if (rc != OK)
-				{
-					Info = 0x300E; /* not supported */
-					plci->relatedPTYPLCI = NULL;
-					plci->ptyState = 0;
-				}
-				sendf(rplci->appl,
-				      _FACILITY_R | CONFIRM,
-				      rId,
-				      plci->number,
-				      "wws", Info, (word)3, SSparms);
-				break;
-
-				/* Explicit Call Transfer pending */
-			case ECT_REQ_PEND:
-				dbug(1, dprintf("ECT_RC=0x%x/0x%x", req, rc));
-				if (!plci->relatedPTYPLCI) break;
-				rplci = plci->relatedPTYPLCI;
-				SSparms[1] = S_ECT;
-				rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
-				if (rplci->tel) rId |= EXT_CONTROLLER;
-				if (rc != OK)
-				{
-					Info = 0x300E; /* not supported */
-					plci->relatedPTYPLCI = NULL;
-					plci->ptyState = 0;
-				}
-				sendf(rplci->appl,
-				      _FACILITY_R | CONFIRM,
-				      rId,
-				      plci->number,
-				      "wws", Info, (word)3, SSparms);
-				break;
-
-			case _MANUFACTURER_R:
-				dbug(1, dprintf("_Manufacturer_R=0x%x/0x%x", req, rc));
-				if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
-				{
-					dbug(1, dprintf("No more IDs"));
-					sendf(appl, _MANUFACTURER_R | CONFIRM, Id, Number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
-					plci_remove(plci);  /* after codec init, internal codec commands pending */
-				}
-				break;
-
-			case _CONNECT_R:
-				dbug(1, dprintf("_Connect_R=0x%x/0x%x", req, rc));
-				if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
-				{
-					dbug(1, dprintf("No more IDs"));
-					sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
-					plci_remove(plci);  /* after codec init, internal codec commands pending */
-				}
-				break;
-
-			case PERM_COD_HOOK:                     /* finished with Hook_Ind */
-				return;
-
-			case PERM_COD_CALL:
-				dbug(1, dprintf("***Codec Connect_Pending A, Rc = 0x%x", rc));
-				plci->internal_command = PERM_COD_CONN_PEND;
-				return;
-
-			case PERM_COD_ASSIGN:
-				dbug(1, dprintf("***Codec Assign A, Rc = 0x%x", rc));
-				if (rc != ASSIGN_OK) break;
-				sig_req(plci, CALL_REQ, 0);
-				send_req(plci);
-				plci->internal_command = PERM_COD_CALL;
-				return;
-
-				/* Null Call Reference Request pending */
-			case C_NCR_FAC_REQ:
-				dbug(1, dprintf("NCR_FAC=0x%x/0x%x", req, rc));
-				if (global_req == ASSIGN)
-				{
-					if (rc == ASSIGN_OK)
-					{
-						return;
-					}
-					else
-					{
-						sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
-						appl->NullCREnable = false;
-						plci_remove(plci);
-					}
-				}
-				else if (req == NCR_FACILITY)
-				{
-					if (rc == OK)
-					{
-						sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", 0);
-					}
-					else
-					{
-						sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
-						appl->NullCREnable = false;
-					}
-					plci_remove(plci);
-				}
-				break;
-
-			case HOOK_ON_REQ:
-				if (plci->channels)
-				{
-					if (a->ncci_state[ncci] == CONNECTED)
-					{
-						a->ncci_state[ncci] = OUTG_DIS_PENDING;
-						cleanup_ncci_data(plci, ncci);
-						nl_req_ncci(plci, N_DISC, (byte)ncci);
-					}
-					break;
-				}
-				break;
-
-			case HOOK_OFF_REQ:
-				if (plci->State == INC_DIS_PENDING)
-					break;
-				sig_req(plci, CALL_REQ, 0);
-				send_req(plci);
-				plci->State = OUTG_CON_PENDING;
-				break;
-
-
-			case MWI_ACTIVATE_REQ_PEND:
-			case MWI_DEACTIVATE_REQ_PEND:
-				if (global_req == ASSIGN && rc == ASSIGN_OK)
-				{
-					dbug(1, dprintf("MWI_REQ assigned"));
-					return;
-				}
-				else if (rc != OK)
-				{
-					if (rc == WRONG_IE)
-					{
-						Info = 0x2007; /* Illegal message parameter coding */
-						dbug(1, dprintf("MWI_REQ invalid parameter"));
-					}
-					else
-					{
-						Info = 0x300B; /* not supported */
-						dbug(1, dprintf("MWI_REQ not supported"));
-					}
-					/* 0x3010: Request not allowed in this state */
-					PUT_WORD(&SSparms[4], 0x300E); /* SS not supported */
-
-				}
-				if (plci->internal_command == MWI_ACTIVATE_REQ_PEND)
-				{
-					PUT_WORD(&SSparms[1], S_MWI_ACTIVATE);
-				}
-				else PUT_WORD(&SSparms[1], S_MWI_DEACTIVATE);
-
-				if (plci->cr_enquiry)
-				{
-					sendf(plci->appl,
-					      _FACILITY_R | CONFIRM,
-					      Id & 0xf,
-					      plci->number,
-					      "wws", Info, (word)3, SSparms);
-					if (rc != OK) plci_remove(plci);
-				}
-				else
-				{
-					sendf(plci->appl,
-					      _FACILITY_R | CONFIRM,
-					      Id,
-					      plci->number,
-					      "wws", Info, (word)3, SSparms);
-				}
-				break;
-
-			case CONF_BEGIN_REQ_PEND:
-			case CONF_ADD_REQ_PEND:
-			case CONF_SPLIT_REQ_PEND:
-			case CONF_DROP_REQ_PEND:
-			case CONF_ISOLATE_REQ_PEND:
-			case CONF_REATTACH_REQ_PEND:
-				dbug(1, dprintf("CONF_RC=0x%x/0x%x", req, rc));
-				if ((plci->internal_command == CONF_ADD_REQ_PEND) && (!plci->relatedPTYPLCI)) break;
-				rplci = plci;
-				rId = Id;
-				switch (plci->internal_command)
-				{
-				case CONF_BEGIN_REQ_PEND:
-					SSparms[1] = S_CONF_BEGIN;
-					break;
-				case CONF_ADD_REQ_PEND:
-					SSparms[1] = S_CONF_ADD;
-					rplci = plci->relatedPTYPLCI;
-					rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
-					break;
-				case CONF_SPLIT_REQ_PEND:
-					SSparms[1] = S_CONF_SPLIT;
-					break;
-				case CONF_DROP_REQ_PEND:
-					SSparms[1] = S_CONF_DROP;
-					break;
-				case CONF_ISOLATE_REQ_PEND:
-					SSparms[1] = S_CONF_ISOLATE;
-					break;
-				case CONF_REATTACH_REQ_PEND:
-					SSparms[1] = S_CONF_REATTACH;
-					break;
-				}
-
-				if (rc != OK)
-				{
-					Info = 0x300E; /* not supported */
-					plci->relatedPTYPLCI = NULL;
-					plci->ptyState = 0;
-				}
-				sendf(rplci->appl,
-				      _FACILITY_R | CONFIRM,
-				      rId,
-				      plci->number,
-				      "wws", Info, (word)3, SSparms);
-				break;
-
-			case VSWITCH_REQ_PEND:
-				if (rc != OK)
-				{
-					if (plci->relatedPTYPLCI)
-					{
-						plci->relatedPTYPLCI->vswitchstate = 0;
-						plci->relatedPTYPLCI->vsprot = 0;
-						plci->relatedPTYPLCI->vsprotdialect = 0;
-					}
-					plci->vswitchstate = 0;
-					plci->vsprot = 0;
-					plci->vsprotdialect = 0;
-				}
-				else
-				{
-					if (plci->relatedPTYPLCI &&
-					    plci->vswitchstate == 1 &&
-					    plci->relatedPTYPLCI->vswitchstate == 3) /* join complete */
-						plci->vswitchstate = 3;
-				}
-				break;
-
-				/* Call Deflection Request pending (SSCT) */
-			case CD_REQ_PEND:
-				SSparms[1] = S_CALL_DEFLECTION;
-				if (rc != OK)
-				{
-					Info = 0x300E; /* not supported */
-					plci->appl->CDEnable = 0;
-				}
-				sendf(plci->appl, _FACILITY_R | CONFIRM, Id,
-				      plci->number, "wws", Info, (word)3, SSparms);
-				break;
-
-			case RTP_CONNECT_B3_REQ_COMMAND_2:
-				if (rc == OK)
-				{
-					ncci = get_ncci(plci, ch, 0);
-					Id = (Id & 0xffff) | (((dword) ncci) << 16);
-					plci->channels++;
-					a->ncci_state[ncci] = OUTG_CON_PENDING;
-				}
-				/* fall through */
-
-			default:
-				if (plci->internal_command_queue[0])
-				{
-					(*(plci->internal_command_queue[0]))(Id, plci, rc);
-					if (plci->internal_command)
-						return;
-				}
-				break;
-			}
-			next_internal_command(Id, plci);
-		}
-	}
-	else /* appl==0 */
-	{
-		Id = ((word)plci->Id << 8) | plci->adapter->Id;
-		if (plci->tel) Id |= EXT_CONTROLLER;
-
-		switch (plci->internal_command)
-		{
-		case BLOCK_PLCI:
-			return;
-
-		case START_L1_SIG_ASSIGN_PEND:
-		case REM_L1_SIG_ASSIGN_PEND:
-			if (global_req == ASSIGN)
-			{
-				break;
-			}
-			else
-			{
-				dbug(1, dprintf("***L1 Req rem PLCI"));
-				plci->internal_command = 0;
-				sig_req(plci, REMOVE, 0);
-				send_req(plci);
-			}
-			break;
-
-			/* Call Deflection Request pending, just no appl ptr assigned */
-		case CD_REQ_PEND:
-			SSparms[1] = S_CALL_DEFLECTION;
-			if (rc != OK)
-			{
-				Info = 0x300E; /* not supported */
-			}
-			for (i = 0; i < max_appl; i++)
-			{
-				if (application[i].CDEnable)
-				{
-					if (!application[i].Id) application[i].CDEnable = 0;
-					else
-					{
-						sendf(&application[i], _FACILITY_R | CONFIRM, Id,
-						      plci->number, "wws", Info, (word)3, SSparms);
-						if (Info) application[i].CDEnable = 0;
-					}
-				}
-			}
-			plci->internal_command = 0;
-			break;
-
-		case PERM_COD_HOOK:                   /* finished with Hook_Ind */
-			return;
-
-		case PERM_COD_CALL:
-			plci->internal_command = PERM_COD_CONN_PEND;
-			dbug(1, dprintf("***Codec Connect_Pending, Rc = 0x%x", rc));
-			return;
-
-		case PERM_COD_ASSIGN:
-			dbug(1, dprintf("***Codec Assign, Rc = 0x%x", rc));
-			plci->internal_command = 0;
-			if (rc != ASSIGN_OK) break;
-			plci->internal_command = PERM_COD_CALL;
-			sig_req(plci, CALL_REQ, 0);
-			send_req(plci);
-			return;
-
-		case LISTEN_SIG_ASSIGN_PEND:
-			if (rc == ASSIGN_OK)
-			{
-				plci->internal_command = 0;
-				dbug(1, dprintf("ListenCheck, new SIG_ID = 0x%x", plci->Sig.Id));
-				add_p(plci, ESC, "\x02\x18\x00");             /* support call waiting */
-				sig_req(plci, INDICATE_REQ, 0);
-				send_req(plci);
-			}
-			else
-			{
-				dbug(1, dprintf("ListenCheck failed (assignRc=0x%x)", rc));
-				a->listen_active--;
-				plci_remove(plci);
-				plci->State = IDLE;
-			}
-			break;
-
-		case USELAW_REQ:
-			if (global_req == ASSIGN)
-			{
-				if (rc == ASSIGN_OK)
-				{
-					sig_req(plci, LAW_REQ, 0);
-					send_req(plci);
-					dbug(1, dprintf("Auto-Law assigned"));
-				}
-				else
-				{
-					dbug(1, dprintf("Auto-Law assign failed"));
-					a->automatic_law = 3;
-					plci->internal_command = 0;
-					a->automatic_lawPLCI = NULL;
-				}
-				break;
-			}
-			else if (req == LAW_REQ && rc == OK)
-			{
-				dbug(1, dprintf("Auto-Law initiated"));
-				a->automatic_law = 2;
-				plci->internal_command = 0;
-			}
-			else
-			{
-				dbug(1, dprintf("Auto-Law not supported"));
-				a->automatic_law = 3;
-				plci->internal_command = 0;
-				sig_req(plci, REMOVE, 0);
-				send_req(plci);
-				a->automatic_lawPLCI = NULL;
-			}
-			break;
-		}
-		plci_remove_check(plci);
-	}
-}
-
-static void data_rc(PLCI *plci, byte ch)
-{
-	dword Id;
-	DIVA_CAPI_ADAPTER *a;
-	NCCI *ncci_ptr;
-	DATA_B3_DESC *data;
-	word ncci;
-
-	if (plci->appl)
-	{
-		TransmitBufferFree(plci->appl, plci->data_sent_ptr);
-		a = plci->adapter;
-		ncci = a->ch_ncci[ch];
-		if (ncci && (a->ncci_plci[ncci] == plci->Id))
-		{
-			ncci_ptr = &(a->ncci[ncci]);
-			dbug(1, dprintf("data_out=%d, data_pending=%d", ncci_ptr->data_out, ncci_ptr->data_pending));
-			if (ncci_ptr->data_pending)
-			{
-				data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
-				if (!(data->Flags & 4) && a->ncci_state[ncci])
-				{
-					Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
-					if (plci->tel) Id |= EXT_CONTROLLER;
-					sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, data->Number,
-					      "ww", data->Handle, 0);
-				}
-				(ncci_ptr->data_out)++;
-				if (ncci_ptr->data_out == MAX_DATA_B3)
-					ncci_ptr->data_out = 0;
-				(ncci_ptr->data_pending)--;
-			}
-		}
-	}
-}
-
-static void data_ack(PLCI *plci, byte ch)
-{
-	dword Id;
-	DIVA_CAPI_ADAPTER *a;
-	NCCI *ncci_ptr;
-	word ncci;
-
-	a = plci->adapter;
-	ncci = a->ch_ncci[ch];
-	ncci_ptr = &(a->ncci[ncci]);
-	if (ncci_ptr->data_ack_pending)
-	{
-		if (a->ncci_state[ncci] && (a->ncci_plci[ncci] == plci->Id))
-		{
-			Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
-			if (plci->tel) Id |= EXT_CONTROLLER;
-			sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, ncci_ptr->DataAck[ncci_ptr->data_ack_out].Number,
-			      "ww", ncci_ptr->DataAck[ncci_ptr->data_ack_out].Handle, 0);
-		}
-		(ncci_ptr->data_ack_out)++;
-		if (ncci_ptr->data_ack_out == MAX_DATA_ACK)
-			ncci_ptr->data_ack_out = 0;
-		(ncci_ptr->data_ack_pending)--;
-	}
-}
-
-static void sig_ind(PLCI *plci)
-{
-	dword x_Id;
-	dword Id;
-	dword rId;
-	word i;
-	word cip;
-	dword cip_mask;
-	byte *ie;
-	DIVA_CAPI_ADAPTER *a;
-	API_PARSE saved_parms[MAX_MSG_PARMS + 1];
-#define MAXPARMSIDS 31
-	byte *parms[MAXPARMSIDS];
-	byte *add_i[4];
-	byte *multi_fac_parms[MAX_MULTI_IE];
-	byte *multi_pi_parms[MAX_MULTI_IE];
-	byte *multi_ssext_parms[MAX_MULTI_IE];
-	byte *multi_CiPN_parms[MAX_MULTI_IE];
-
-	byte *multi_vswitch_parms[MAX_MULTI_IE];
-
-	byte ai_len;
-	byte *esc_chi = "";
-	byte *esc_law = "";
-	byte *pty_cai = "";
-	byte *esc_cr  = "";
-	byte *esc_profile = "";
-
-	byte facility[256];
-	PLCI *tplci = NULL;
-	byte chi[] = "\x02\x18\x01";
-	byte voice_cai[]  = "\x06\x14\x00\x00\x00\x00\x08";
-	byte resume_cau[] = "\x05\x05\x00\x02\x00\x00";
-	/* ESC_MSGTYPE must be the last but one message, a new IE has to be */
-	/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
-	/* SMSG is situated at the end because its 0 (for compatibility reasons */
-	/* (see Info_Mask Bit 4, first IE. then the message type)           */
-	static const word parms_id[] =
-		{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
-		 UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
-		 RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
-		 CST, ESC_PROFILE, 0xff, ESC_MSGTYPE, SMSG};
-	/* 14 FTY repl by ESC_CHI */
-	/* 18 PI  repl by ESC_LAW */
-	/* removed OAD changed to 0xff for future use, OAD is multiIE now */
-	static const word multi_fac_id[] = {1, FTY};
-	static const word multi_pi_id[]  = {1, PI};
-	static const word multi_CiPN_id[]  = {1, OAD};
-	static const word multi_ssext_id[]  = {1, ESC_SSEXT};
-
-	static const word multi_vswitch_id[]  = {1, ESC_VSWITCH};
-
-	byte *cau;
-	word ncci;
-	byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
-	byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
-	byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
-	byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
-	byte force_mt_info = false;
-	byte dir;
-	dword d;
-	word w;
-
-	a = plci->adapter;
-	Id = ((word)plci->Id << 8) | a->Id;
-	PUT_WORD(&SS_Ind[4], 0x0000);
-
-	if (plci->sig_remove_id)
-	{
-		plci->Sig.RNR = 2; /* discard */
-		dbug(1, dprintf("SIG discard while remove pending"));
-		return;
-	}
-	if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
-	dbug(1, dprintf("SigInd-Id=%08lx,plci=%x,tel=%x,state=0x%x,channels=%d,Discflowcl=%d",
-			Id, plci->Id, plci->tel, plci->State, plci->channels, plci->hangup_flow_ctrl_timer));
-	if (plci->Sig.Ind == CALL_HOLD_ACK && plci->channels)
-	{
-		plci->Sig.RNR = 1;
-		return;
-	}
-	if (plci->Sig.Ind == HANGUP && plci->channels)
-	{
-		plci->Sig.RNR = 1;
-		plci->hangup_flow_ctrl_timer++;
-		/* recover the network layer after timeout */
-		if (plci->hangup_flow_ctrl_timer == 100)
-		{
-			dbug(1, dprintf("Exceptional disc"));
-			plci->Sig.RNR = 0;
-			plci->hangup_flow_ctrl_timer = 0;
-			for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
-			{
-				if (a->ncci_plci[ncci] == plci->Id)
-				{
-					cleanup_ncci_data(plci, ncci);
-					if (plci->channels)plci->channels--;
-					if (plci->appl)
-						sendf(plci->appl, _DISCONNECT_B3_I, (((dword) ncci) << 16) | Id, 0, "ws", 0, "");
-				}
-			}
-			if (plci->appl)
-				sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
-			plci_remove(plci);
-			plci->State = IDLE;
-		}
-		return;
-	}
-
-	/* do first parse the info with no OAD in, because OAD will be converted */
-	/* first the multiple facility IE, then mult. progress ind.              */
-	/* then the parameters for the info_ind + conn_ind                       */
-	IndParse(plci, multi_fac_id, multi_fac_parms, MAX_MULTI_IE);
-	IndParse(plci, multi_pi_id, multi_pi_parms, MAX_MULTI_IE);
-	IndParse(plci, multi_ssext_id, multi_ssext_parms, MAX_MULTI_IE);
-
-	IndParse(plci, multi_vswitch_id, multi_vswitch_parms, MAX_MULTI_IE);
-
-	IndParse(plci, parms_id, parms, 0);
-	IndParse(plci, multi_CiPN_id, multi_CiPN_parms, MAX_MULTI_IE);
-	esc_chi  = parms[14];
-	esc_law  = parms[18];
-	pty_cai  = parms[24];
-	esc_cr   = parms[25];
-	esc_profile = parms[27];
-	if (esc_cr[0] && plci)
-	{
-		if (plci->cr_enquiry && plci->appl)
-		{
-			plci->cr_enquiry = false;
-			/* d = MANU_ID            */
-			/* w = m_command          */
-			/* b = total length       */
-			/* b = indication type    */
-			/* b = length of all IEs  */
-			/* b = IE1                */
-			/* S = IE1 length + cont. */
-			/* b = IE2                */
-			/* S = IE2 length + cont. */
-			sendf(plci->appl,
-			      _MANUFACTURER_I,
-			      Id,
-			      0,
-			      "dwbbbbSbS", _DI_MANU_ID, plci->m_command,
-			      2 + 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], plci->Sig.Ind, 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], ESC, esc_cr, ESC, esc_law);
-		}
-	}
-	/* create the additional info structure                                  */
-	add_i[1] = parms[15]; /* KEY of additional info */
-	add_i[2] = parms[11]; /* UUI of additional info */
-	ai_len = AddInfo(add_i, multi_fac_parms, esc_chi, facility);
-
-	/* the ESC_LAW indicates if u-Law or a-Law is actually used by the card  */
-	/* indication returns by the card if requested by the function           */
-	/* AutomaticLaw() after driver init                                      */
-	if (a->automatic_law < 4)
-	{
-		if (esc_law[0]) {
-			if (esc_law[2]) {
-				dbug(0, dprintf("u-Law selected"));
-				a->u_law = 1;
-			}
-			else {
-				dbug(0, dprintf("a-Law selected"));
-				a->u_law = 0;
-			}
-			a->automatic_law = 4;
-			if (plci == a->automatic_lawPLCI) {
-				plci->internal_command = 0;
-				sig_req(plci, REMOVE, 0);
-				send_req(plci);
-				a->automatic_lawPLCI = NULL;
-			}
-		}
-		if (esc_profile[0])
-		{
-			dbug(1, dprintf("[%06x] CardProfile: %lx %lx %lx %lx %lx",
-					UnMapController(a->Id), GET_DWORD(&esc_profile[6]),
-					GET_DWORD(&esc_profile[10]), GET_DWORD(&esc_profile[14]),
-					GET_DWORD(&esc_profile[18]), GET_DWORD(&esc_profile[46])));
-
-			a->profile.Global_Options &= 0x000000ffL;
-			a->profile.B1_Protocols &= 0x000003ffL;
-			a->profile.B2_Protocols &= 0x00001fdfL;
-			a->profile.B3_Protocols &= 0x000000b7L;
-
-			a->profile.Global_Options &= GET_DWORD(&esc_profile[6]) |
-				GL_BCHANNEL_OPERATION_SUPPORTED;
-			a->profile.B1_Protocols &= GET_DWORD(&esc_profile[10]);
-			a->profile.B2_Protocols &= GET_DWORD(&esc_profile[14]);
-			a->profile.B3_Protocols &= GET_DWORD(&esc_profile[18]);
-			a->manufacturer_features = GET_DWORD(&esc_profile[46]);
-			a->man_profile.private_options = 0;
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_ECHO_CANCELLER)
-			{
-				a->man_profile.private_options |= 1L << PRIVATE_ECHO_CANCELLER;
-				a->profile.Global_Options |= GL_ECHO_CANCELLER_SUPPORTED;
-			}
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_RTP)
-				a->man_profile.private_options |= 1L << PRIVATE_RTP;
-			a->man_profile.rtp_primary_payloads = GET_DWORD(&esc_profile[50]);
-			a->man_profile.rtp_additional_payloads = GET_DWORD(&esc_profile[54]);
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_T38)
-				a->man_profile.private_options |= 1L << PRIVATE_T38;
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD)
-				a->man_profile.private_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_V18)
-				a->man_profile.private_options |= 1L << PRIVATE_V18;
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_TONE)
-				a->man_profile.private_options |= 1L << PRIVATE_DTMF_TONE;
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_PIAFS)
-				a->man_profile.private_options |= 1L << PRIVATE_PIAFS;
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
-				a->man_profile.private_options |= 1L << PRIVATE_FAX_PAPER_FORMATS;
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_VOWN)
-				a->man_profile.private_options |= 1L << PRIVATE_VOWN;
-
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_NONSTANDARD)
-				a->man_profile.private_options |= 1L << PRIVATE_FAX_NONSTANDARD;
-
-		}
-		else
-		{
-			a->profile.Global_Options &= 0x0000007fL;
-			a->profile.B1_Protocols &= 0x000003dfL;
-			a->profile.B2_Protocols &= 0x00001adfL;
-			a->profile.B3_Protocols &= 0x000000b7L;
-			a->manufacturer_features &= MANUFACTURER_FEATURE_HARDDTMF;
-		}
-		if (a->manufacturer_features & (MANUFACTURER_FEATURE_HARDDTMF |
-						MANUFACTURER_FEATURE_SOFTDTMF_SEND | MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
-		{
-			a->profile.Global_Options |= GL_DTMF_SUPPORTED;
-		}
-		a->manufacturer_features &= ~MANUFACTURER_FEATURE_OOB_CHANNEL;
-		dbug(1, dprintf("[%06x] Profile: %lx %lx %lx %lx %lx",
-				UnMapController(a->Id), a->profile.Global_Options,
-				a->profile.B1_Protocols, a->profile.B2_Protocols,
-				a->profile.B3_Protocols, a->manufacturer_features));
-	}
-	/* codec plci for the handset/hook state support is just an internal id  */
-	if (plci != a->AdvCodecPLCI)
-	{
-		force_mt_info = SendMultiIE(plci, Id, multi_fac_parms, FTY, 0x20, 0);
-		force_mt_info |= SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, 0);
-		SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
-		SendInfo(plci, Id, parms, force_mt_info);
-
-		VSwitchReqInd(plci, Id, multi_vswitch_parms);
-
-	}
-
-	/* switch the codec to the b-channel                                     */
-	if (esc_chi[0] && plci && !plci->SuppState) {
-		plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
-		mixer_set_bchannel_id_esc(plci, plci->b_channel);
-		dbug(1, dprintf("storeChannel=0x%x", plci->b_channel));
-		if (plci->tel == ADV_VOICE && plci->appl) {
-			SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
-		}
-	}
-
-	if (plci->appl) plci->appl->Number++;
-
-	switch (plci->Sig.Ind) {
-		/* Response to Get_Supported_Services request */
-	case S_SUPPORTED:
-		dbug(1, dprintf("S_Supported"));
-		if (!plci->appl) break;
-		if (pty_cai[0] == 4)
-		{
-			PUT_DWORD(&CF_Ind[6], GET_DWORD(&pty_cai[1]));
-		}
-		else
-		{
-			PUT_DWORD(&CF_Ind[6], MASK_TERMINAL_PORTABILITY | MASK_HOLD_RETRIEVE);
-		}
-		PUT_WORD(&CF_Ind[1], 0);
-		PUT_WORD(&CF_Ind[4], 0);
-		sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7, plci->number, "wws", 0, 3, CF_Ind);
-		plci_remove(plci);
-		break;
-
-		/* Supplementary Service rejected */
-	case S_SERVICE_REJ:
-		dbug(1, dprintf("S_Reject=0x%x", pty_cai[5]));
-		if (!pty_cai[0]) break;
-		switch (pty_cai[5])
-		{
-		case ECT_EXECUTE:
-		case THREE_PTY_END:
-		case THREE_PTY_BEGIN:
-			if (!plci->relatedPTYPLCI) break;
-			tplci = plci->relatedPTYPLCI;
-			rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
-			if (tplci->tel) rId |= EXT_CONTROLLER;
-			if (pty_cai[5] == ECT_EXECUTE)
-			{
-				PUT_WORD(&SS_Ind[1], S_ECT);
-
-				plci->vswitchstate = 0;
-				plci->relatedPTYPLCI->vswitchstate = 0;
-
-			}
-			else
-			{
-				PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
-			}
-			if (pty_cai[2] != 0xff)
-			{
-				PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
-			}
-			else
-			{
-				PUT_WORD(&SS_Ind[4], 0x300E);
-			}
-			plci->relatedPTYPLCI = NULL;
-			plci->ptyState = 0;
-			sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
-			break;
-
-		case CALL_DEFLECTION:
-			if (pty_cai[2] != 0xff)
-			{
-				PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
-			}
-			else
-			{
-				PUT_WORD(&SS_Ind[4], 0x300E);
-			}
-			PUT_WORD(&SS_Ind[1], pty_cai[5]);
-			for (i = 0; i < max_appl; i++)
-			{
-				if (application[i].CDEnable)
-				{
-					if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
-					application[i].CDEnable = false;
-				}
-			}
-			break;
-
-		case DEACTIVATION_DIVERSION:
-		case ACTIVATION_DIVERSION:
-		case DIVERSION_INTERROGATE_CFU:
-		case DIVERSION_INTERROGATE_CFB:
-		case DIVERSION_INTERROGATE_CFNR:
-		case DIVERSION_INTERROGATE_NUM:
-		case CCBS_REQUEST:
-		case CCBS_DEACTIVATE:
-		case CCBS_INTERROGATE:
-			if (!plci->appl) break;
-			if (pty_cai[2] != 0xff)
-			{
-				PUT_WORD(&Interr_Err_Ind[4], 0x3600 | (word)pty_cai[2]);
-			}
-			else
-			{
-				PUT_WORD(&Interr_Err_Ind[4], 0x300E);
-			}
-			switch (pty_cai[5])
-			{
-			case DEACTIVATION_DIVERSION:
-				dbug(1, dprintf("Deact_Div"));
-				Interr_Err_Ind[0] = 0x9;
-				Interr_Err_Ind[3] = 0x6;
-				PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_STOP);
-				break;
-			case ACTIVATION_DIVERSION:
-				dbug(1, dprintf("Act_Div"));
-				Interr_Err_Ind[0] = 0x9;
-				Interr_Err_Ind[3] = 0x6;
-				PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_START);
-				break;
-			case DIVERSION_INTERROGATE_CFU:
-			case DIVERSION_INTERROGATE_CFB:
-			case DIVERSION_INTERROGATE_CFNR:
-				dbug(1, dprintf("Interr_Div"));
-				Interr_Err_Ind[0] = 0xa;
-				Interr_Err_Ind[3] = 0x7;
-				PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_DIVERSION);
-				break;
-			case DIVERSION_INTERROGATE_NUM:
-				dbug(1, dprintf("Interr_Num"));
-				Interr_Err_Ind[0] = 0xa;
-				Interr_Err_Ind[3] = 0x7;
-				PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_NUMBERS);
-				break;
-			case CCBS_REQUEST:
-				dbug(1, dprintf("CCBS Request"));
-				Interr_Err_Ind[0] = 0xd;
-				Interr_Err_Ind[3] = 0xa;
-				PUT_WORD(&Interr_Err_Ind[1], S_CCBS_REQUEST);
-				break;
-			case CCBS_DEACTIVATE:
-				dbug(1, dprintf("CCBS Deactivate"));
-				Interr_Err_Ind[0] = 0x9;
-				Interr_Err_Ind[3] = 0x6;
-				PUT_WORD(&Interr_Err_Ind[1], S_CCBS_DEACTIVATE);
-				break;
-			case CCBS_INTERROGATE:
-				dbug(1, dprintf("CCBS Interrogate"));
-				Interr_Err_Ind[0] = 0xb;
-				Interr_Err_Ind[3] = 0x8;
-				PUT_WORD(&Interr_Err_Ind[1], S_CCBS_INTERROGATE);
-				break;
-			}
-			PUT_DWORD(&Interr_Err_Ind[6], plci->appl->S_Handle);
-			sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, Interr_Err_Ind);
-			plci_remove(plci);
-			break;
-		case ACTIVATION_MWI:
-		case DEACTIVATION_MWI:
-			if (pty_cai[5] == ACTIVATION_MWI)
-			{
-				PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
-			}
-			else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
-
-			if (pty_cai[2] != 0xff)
-			{
-				PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
-			}
-			else
-			{
-				PUT_WORD(&SS_Ind[4], 0x300E);
-			}
-
-			if (plci->cr_enquiry)
-			{
-				sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
-				plci_remove(plci);
-			}
-			else
-			{
-				sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
-			}
-			break;
-		case CONF_ADD: /* ERROR */
-		case CONF_BEGIN:
-		case CONF_DROP:
-		case CONF_ISOLATE:
-		case CONF_REATTACH:
-			CONF_Ind[0] = 9;
-			CONF_Ind[3] = 6;
-			switch (pty_cai[5])
-			{
-			case CONF_BEGIN:
-				PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
-				plci->ptyState = 0;
-				break;
-			case CONF_DROP:
-				CONF_Ind[0] = 5;
-				CONF_Ind[3] = 2;
-				PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
-				plci->ptyState = CONNECTED;
-				break;
-			case CONF_ISOLATE:
-				CONF_Ind[0] = 5;
-				CONF_Ind[3] = 2;
-				PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
-				plci->ptyState = CONNECTED;
-				break;
-			case CONF_REATTACH:
-				CONF_Ind[0] = 5;
-				CONF_Ind[3] = 2;
-				PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
-				plci->ptyState = CONNECTED;
-				break;
-			case CONF_ADD:
-				PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
-				plci->relatedPTYPLCI = NULL;
-				tplci = plci->relatedPTYPLCI;
-				if (tplci) tplci->ptyState = CONNECTED;
-				plci->ptyState = CONNECTED;
-				break;
-			}
-
-			if (pty_cai[2] != 0xff)
-			{
-				PUT_WORD(&CONF_Ind[4], 0x3600 | (word)pty_cai[2]);
-			}
-			else
-			{
-				PUT_WORD(&CONF_Ind[4], 0x3303); /* Time-out: network did not respond
-								  within the required time */
-			}
-
-			PUT_DWORD(&CONF_Ind[6], 0x0);
-			sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
-			break;
-		}
-		break;
-
-		/* Supplementary Service indicates success */
-	case S_SERVICE:
-		dbug(1, dprintf("Service_Ind"));
-		PUT_WORD(&CF_Ind[4], 0);
-		switch (pty_cai[5])
-		{
-		case THREE_PTY_END:
-		case THREE_PTY_BEGIN:
-		case ECT_EXECUTE:
-			if (!plci->relatedPTYPLCI) break;
-			tplci = plci->relatedPTYPLCI;
-			rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
-			if (tplci->tel) rId |= EXT_CONTROLLER;
-			if (pty_cai[5] == ECT_EXECUTE)
-			{
-				PUT_WORD(&SS_Ind[1], S_ECT);
-
-				if (plci->vswitchstate != 3)
-				{
-
-					plci->ptyState = IDLE;
-					plci->relatedPTYPLCI = NULL;
-					plci->ptyState = 0;
-
-				}
-
-				dbug(1, dprintf("ECT OK"));
-				sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
-
-
-
-			}
-			else
-			{
-				switch (plci->ptyState)
-				{
-				case S_3PTY_BEGIN:
-					plci->ptyState = CONNECTED;
-					dbug(1, dprintf("3PTY ON"));
-					break;
-
-				case S_3PTY_END:
-					plci->ptyState = IDLE;
-					plci->relatedPTYPLCI = NULL;
-					plci->ptyState = 0;
-					dbug(1, dprintf("3PTY OFF"));
-					break;
-				}
-				PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
-				sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
-			}
-			break;
-
-		case CALL_DEFLECTION:
-			PUT_WORD(&SS_Ind[1], pty_cai[5]);
-			for (i = 0; i < max_appl; i++)
-			{
-				if (application[i].CDEnable)
-				{
-					if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
-					application[i].CDEnable = false;
-				}
-			}
-			break;
-
-		case DEACTIVATION_DIVERSION:
-		case ACTIVATION_DIVERSION:
-			if (!plci->appl) break;
-			PUT_WORD(&CF_Ind[1], pty_cai[5] + 2);
-			PUT_DWORD(&CF_Ind[6], plci->appl->S_Handle);
-			sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, CF_Ind);
-			plci_remove(plci);
-			break;
-
-		case DIVERSION_INTERROGATE_CFU:
-		case DIVERSION_INTERROGATE_CFB:
-		case DIVERSION_INTERROGATE_CFNR:
-		case DIVERSION_INTERROGATE_NUM:
-		case CCBS_REQUEST:
-		case CCBS_DEACTIVATE:
-		case CCBS_INTERROGATE:
-			if (!plci->appl) break;
-			switch (pty_cai[5])
-			{
-			case DIVERSION_INTERROGATE_CFU:
-			case DIVERSION_INTERROGATE_CFB:
-			case DIVERSION_INTERROGATE_CFNR:
-				dbug(1, dprintf("Interr_Div"));
-				PUT_WORD(&pty_cai[1], S_INTERROGATE_DIVERSION);
-				pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
-				break;
-			case DIVERSION_INTERROGATE_NUM:
-				dbug(1, dprintf("Interr_Num"));
-				PUT_WORD(&pty_cai[1], S_INTERROGATE_NUMBERS);
-				pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
-				break;
-			case CCBS_REQUEST:
-				dbug(1, dprintf("CCBS Request"));
-				PUT_WORD(&pty_cai[1], S_CCBS_REQUEST);
-				pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
-				break;
-			case CCBS_DEACTIVATE:
-				dbug(1, dprintf("CCBS Deactivate"));
-				PUT_WORD(&pty_cai[1], S_CCBS_DEACTIVATE);
-				pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
-				break;
-			case CCBS_INTERROGATE:
-				dbug(1, dprintf("CCBS Interrogate"));
-				PUT_WORD(&pty_cai[1], S_CCBS_INTERROGATE);
-				pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
-				break;
-			}
-			PUT_WORD(&pty_cai[4], 0); /* Supplementary Service Reason */
-			PUT_DWORD(&pty_cai[6], plci->appl->S_Handle);
-			sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "wS", 3, pty_cai);
-			plci_remove(plci);
-			break;
-
-		case ACTIVATION_MWI:
-		case DEACTIVATION_MWI:
-			if (pty_cai[5] == ACTIVATION_MWI)
-			{
-				PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
-			}
-			else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
-			if (plci->cr_enquiry)
-			{
-				sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
-				plci_remove(plci);
-			}
-			else
-			{
-				sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
-			}
-			break;
-		case MWI_INDICATION:
-			if (pty_cai[0] >= 0x12)
-			{
-				PUT_WORD(&pty_cai[3], S_MWI_INDICATE);
-				pty_cai[2] = pty_cai[0] - 2; /* len Parameter */
-				pty_cai[5] = pty_cai[0] - 5; /* Supplementary Service-specific parameter len */
-				if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_MWI))
-				{
-					if (plci->internal_command == GET_MWI_STATE) /* result on Message Waiting Listen */
-					{
-						sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "wS", 3, &pty_cai[2]);
-						plci_remove(plci);
-						return;
-					}
-					else sendf(plci->appl, _FACILITY_I, Id, 0, "wS", 3, &pty_cai[2]);
-					pty_cai[0] = 0;
-				}
-				else
-				{
-					for (i = 0; i < max_appl; i++)
-					{
-						if (a->Notification_Mask[i]&SMASK_MWI)
-						{
-							sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "wS", 3, &pty_cai[2]);
-							pty_cai[0] = 0;
-						}
-					}
-				}
-
-				if (!pty_cai[0])
-				{ /* acknowledge */
-					facility[2] = 0; /* returncode */
-				}
-				else facility[2] = 0xff;
-			}
-			else
-			{
-				/* reject */
-				facility[2] = 0xff; /* returncode */
-			}
-			facility[0] = 2;
-			facility[1] = MWI_RESPONSE; /* Function */
-			add_p(plci, CAI, facility);
-			add_p(plci, ESC, multi_ssext_parms[0]); /* remembered parameter -> only one possible */
-			sig_req(plci, S_SERVICE, 0);
-			send_req(plci);
-			plci->command = 0;
-			next_internal_command(Id, plci);
-			break;
-		case CONF_ADD: /* OK */
-		case CONF_BEGIN:
-		case CONF_DROP:
-		case CONF_ISOLATE:
-		case CONF_REATTACH:
-		case CONF_PARTYDISC:
-			CONF_Ind[0] = 9;
-			CONF_Ind[3] = 6;
-			switch (pty_cai[5])
-			{
-			case CONF_BEGIN:
-				PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
-				if (pty_cai[0] == 6)
-				{
-					d = pty_cai[6];
-					PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
-				}
-				else
-				{
-					PUT_DWORD(&CONF_Ind[6], 0x0);
-				}
-				break;
-			case CONF_ISOLATE:
-				PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
-				CONF_Ind[0] = 5;
-				CONF_Ind[3] = 2;
-				break;
-			case CONF_REATTACH:
-				PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
-				CONF_Ind[0] = 5;
-				CONF_Ind[3] = 2;
-				break;
-			case CONF_DROP:
-				PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
-				CONF_Ind[0] = 5;
-				CONF_Ind[3] = 2;
-				break;
-			case CONF_ADD:
-				PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
-				d = pty_cai[6];
-				PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
-				tplci = plci->relatedPTYPLCI;
-				if (tplci) tplci->ptyState = CONNECTED;
-				break;
-			case CONF_PARTYDISC:
-				CONF_Ind[0] = 7;
-				CONF_Ind[3] = 4;
-				PUT_WORD(&CONF_Ind[1], S_CONF_PARTYDISC);
-				d = pty_cai[6];
-				PUT_DWORD(&CONF_Ind[4], d); /* PartyID */
-				break;
-			}
-			plci->ptyState = CONNECTED;
-			sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
-			break;
-		case CCBS_INFO_RETAIN:
-		case CCBS_ERASECALLLINKAGEID:
-		case CCBS_STOP_ALERTING:
-			CONF_Ind[0] = 5;
-			CONF_Ind[3] = 2;
-			switch (pty_cai[5])
-			{
-			case CCBS_INFO_RETAIN:
-				PUT_WORD(&CONF_Ind[1], S_CCBS_INFO_RETAIN);
-				break;
-			case CCBS_STOP_ALERTING:
-				PUT_WORD(&CONF_Ind[1], S_CCBS_STOP_ALERTING);
-				break;
-			case CCBS_ERASECALLLINKAGEID:
-				PUT_WORD(&CONF_Ind[1], S_CCBS_ERASECALLLINKAGEID);
-				CONF_Ind[0] = 7;
-				CONF_Ind[3] = 4;
-				CONF_Ind[6] = 0;
-				CONF_Ind[7] = 0;
-				break;
-			}
-			w = pty_cai[6];
-			PUT_WORD(&CONF_Ind[4], w); /* PartyID */
-
-			if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_CCBS))
-			{
-				sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
-			}
-			else
-			{
-				for (i = 0; i < max_appl; i++)
-					if (a->Notification_Mask[i] & SMASK_CCBS)
-						sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "ws", 3, CONF_Ind);
-			}
-			break;
-		}
-		break;
-	case CALL_HOLD_REJ:
-		cau = parms[7];
-		if (cau)
-		{
-			i = _L3_CAUSE | cau[2];
-			if (cau[2] == 0) i = 0x3603;
-		}
-		else
-		{
-			i = 0x3603;
-		}
-		PUT_WORD(&SS_Ind[1], S_HOLD);
-		PUT_WORD(&SS_Ind[4], i);
-		if (plci->SuppState == HOLD_REQUEST)
-		{
-			plci->SuppState = IDLE;
-			sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
-		}
-		break;
-
-	case CALL_HOLD_ACK:
-		if (plci->SuppState == HOLD_REQUEST)
-		{
-			plci->SuppState = CALL_HELD;
-			CodecIdCheck(a, plci);
-			start_internal_command(Id, plci, hold_save_command);
-		}
-		break;
-
-	case CALL_RETRIEVE_REJ:
-		cau = parms[7];
-		if (cau)
-		{
-			i = _L3_CAUSE | cau[2];
-			if (cau[2] == 0) i = 0x3603;
-		}
-		else
-		{
-			i = 0x3603;
-		}
-		PUT_WORD(&SS_Ind[1], S_RETRIEVE);
-		PUT_WORD(&SS_Ind[4], i);
-		if (plci->SuppState == RETRIEVE_REQUEST)
-		{
-			plci->SuppState = CALL_HELD;
-			CodecIdCheck(a, plci);
-			sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
-		}
-		break;
-
-	case CALL_RETRIEVE_ACK:
-		PUT_WORD(&SS_Ind[1], S_RETRIEVE);
-		if (plci->SuppState == RETRIEVE_REQUEST)
-		{
-			plci->SuppState = IDLE;
-			plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
-			plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
-			if (plci->tel)
-			{
-				mixer_set_bchannel_id_esc(plci, plci->b_channel);
-				dbug(1, dprintf("RetrChannel=0x%x", plci->b_channel));
-				SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
-				if (plci->B2_prot == B2_TRANSPARENT && plci->B3_prot == B3_TRANSPARENT)
-				{
-					dbug(1, dprintf("Get B-ch"));
-					start_internal_command(Id, plci, retrieve_restore_command);
-				}
-				else
-					sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
-			}
-			else
-				start_internal_command(Id, plci, retrieve_restore_command);
-		}
-		break;
-
-	case INDICATE_IND:
-		if (plci->State != LISTENING) {
-			sig_req(plci, HANGUP, 0);
-			send_req(plci);
-			break;
-		}
-		cip = find_cip(a, parms[4], parms[6]);
-		cip_mask = 1L << cip;
-		dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask));
-		bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
-		if (!remove_started && !a->adapter_disabled)
-		{
-			group_optimization(a, plci);
-			for_each_set_bit(i, plci->group_optimization_mask_table, max_appl) {
-				if (application[i].Id
-				    && (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask)
-				    && CPN_filter_ok(parms[0], a, i)) {
-					dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i]));
-					__set_bit(i, plci->c_ind_mask_table);
-					dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
-					plci->State = INC_CON_PENDING;
-					plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) |
-						CALL_DIR_IN | CALL_DIR_ANSWER;
-					if (esc_chi[0]) {
-						plci->b_channel = esc_chi[esc_chi[0]] & 0x1f;
-						mixer_set_bchannel_id_esc(plci, plci->b_channel);
-					}
-					/* if a listen on the ext controller is done, check if hook states */
-					/* are supported or if just a on board codec must be activated     */
-					if (a->codec_listen[i] && !a->AdvSignalPLCI) {
-						if (a->profile.Global_Options & HANDSET)
-							plci->tel = ADV_VOICE;
-						else if (a->profile.Global_Options & ON_BOARD_CODEC)
-							plci->tel = CODEC;
-						if (plci->tel) Id |= EXT_CONTROLLER;
-						a->codec_listen[i] = plci;
-					}
-
-					sendf(&application[i], _CONNECT_I, Id, 0,
-					      "wSSSSSSSbSSSSS", cip,    /* CIP                 */
-					      parms[0],    /* CalledPartyNumber   */
-					      multi_CiPN_parms[0],    /* CallingPartyNumber  */
-					      parms[2],    /* CalledPartySubad    */
-					      parms[3],    /* CallingPartySubad   */
-					      parms[4],    /* BearerCapability    */
-					      parms[5],    /* LowLC               */
-					      parms[6],    /* HighLC              */
-					      ai_len,      /* nested struct add_i */
-					      add_i[0],    /* B channel info    */
-					      add_i[1],    /* keypad facility   */
-					      add_i[2],    /* user user data    */
-					      add_i[3],    /* nested facility   */
-					      multi_CiPN_parms[1]    /* second CiPN(SCR)   */
-						);
-					SendSSExtInd(&application[i],
-						     plci,
-						     Id,
-						     multi_ssext_parms);
-					SendSetupInfo(&application[i],
-						      plci,
-						      Id,
-						      parms,
-						      SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true));
-				}
-			}
-			dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
-		}
-		if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) {
-			sig_req(plci, HANGUP, 0);
-			send_req(plci);
-			plci->State = IDLE;
-		}
-		plci->notifiedcall = 0;
-		a->listen_active--;
-		listen_check(a);
-		break;
-
-	case CALL_PEND_NOTIFY:
-		plci->notifiedcall = 1;
-		listen_check(a);
-		break;
-
-	case CALL_IND:
-	case CALL_CON:
-		if (plci->State == ADVANCED_VOICE_SIG || plci->State == ADVANCED_VOICE_NOSIG)
-		{
-			if (plci->internal_command == PERM_COD_CONN_PEND)
-			{
-				if (plci->State == ADVANCED_VOICE_NOSIG)
-				{
-					dbug(1, dprintf("***Codec OK"));
-					if (a->AdvSignalPLCI)
-					{
-						tplci = a->AdvSignalPLCI;
-						if (tplci->spoofed_msg)
-						{
-							dbug(1, dprintf("***Spoofed Msg(0x%x)", tplci->spoofed_msg));
-							tplci->command = 0;
-							tplci->internal_command = 0;
-							x_Id = ((word)tplci->Id << 8) | tplci->adapter->Id | 0x80;
-							switch (tplci->spoofed_msg)
-							{
-							case CALL_RES:
-								tplci->command = _CONNECT_I | RESPONSE;
-								api_load_msg(&tplci->saved_msg, saved_parms);
-								add_b1(tplci, &saved_parms[1], 0, tplci->B1_facilities);
-								if (tplci->adapter->Info_Mask[tplci->appl->Id - 1] & 0x200)
-								{
-									/* early B3 connect (CIP mask bit 9) no release after a disc */
-									add_p(tplci, LLI, "\x01\x01");
-								}
-								add_s(tplci, CONN_NR, &saved_parms[2]);
-								add_s(tplci, LLC, &saved_parms[4]);
-								add_ai(tplci, &saved_parms[5]);
-								tplci->State = INC_CON_ACCEPT;
-								sig_req(tplci, CALL_RES, 0);
-								send_req(tplci);
-								break;
-
-							case AWAITING_SELECT_B:
-								dbug(1, dprintf("Select_B continue"));
-								start_internal_command(x_Id, tplci, select_b_command);
-								break;
-
-							case AWAITING_MANUF_CON: /* Get_Plci per Manufacturer_Req to ext controller */
-								if (!tplci->Sig.Id)
-								{
-									dbug(1, dprintf("No SigID!"));
-									sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
-									plci_remove(tplci);
-									break;
-								}
-								tplci->command = _MANUFACTURER_R;
-								api_load_msg(&tplci->saved_msg, saved_parms);
-								dir = saved_parms[2].info[0];
-								if (dir == 1) {
-									sig_req(tplci, CALL_REQ, 0);
-								}
-								else if (!dir) {
-									sig_req(tplci, LISTEN_REQ, 0);
-								}
-								send_req(tplci);
-								sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, 0);
-								break;
-
-							case (CALL_REQ | AWAITING_MANUF_CON):
-								sig_req(tplci, CALL_REQ, 0);
-								send_req(tplci);
-								break;
-
-							case CALL_REQ:
-								if (!tplci->Sig.Id)
-								{
-									dbug(1, dprintf("No SigID!"));
-									sendf(tplci->appl, _CONNECT_R | CONFIRM, tplci->adapter->Id, 0, "w", _OUT_OF_PLCI);
-									plci_remove(tplci);
-									break;
-								}
-								tplci->command = _CONNECT_R;
-								api_load_msg(&tplci->saved_msg, saved_parms);
-								add_s(tplci, CPN, &saved_parms[1]);
-								add_s(tplci, DSA, &saved_parms[3]);
-								add_ai(tplci, &saved_parms[9]);
-								sig_req(tplci, CALL_REQ, 0);
-								send_req(tplci);
-								break;
-
-							case CALL_RETRIEVE:
-								tplci->command = C_RETRIEVE_REQ;
-								sig_req(tplci, CALL_RETRIEVE, 0);
-								send_req(tplci);
-								break;
-							}
-							tplci->spoofed_msg = 0;
-							if (tplci->internal_command == 0)
-								next_internal_command(x_Id, tplci);
-						}
-					}
-					next_internal_command(Id, plci);
-					break;
-				}
-				dbug(1, dprintf("***Codec Hook Init Req"));
-				plci->internal_command = PERM_COD_HOOK;
-				add_p(plci, FTY, "\x01\x09");             /* Get Hook State*/
-				sig_req(plci, TEL_CTRL, 0);
-				send_req(plci);
-			}
-		}
-		else if (plci->command != _MANUFACTURER_R  /* old style permanent connect */
-			 && plci->State != INC_ACT_PENDING)
-		{
-			mixer_set_bchannel_id_esc(plci, plci->b_channel);
-			if (plci->tel == ADV_VOICE && plci->SuppState == IDLE) /* with permanent codec switch on immediately */
-			{
-				chi[2] = plci->b_channel;
-				SetVoiceChannel(a->AdvCodecPLCI, chi, a);
-			}
-			sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "Sss", parms[21], "", "");
-			plci->State = INC_ACT_PENDING;
-		}
-		break;
-
-	case TEL_CTRL:
-		ie = multi_fac_parms[0]; /* inspect the facility hook indications */
-		if (plci->State == ADVANCED_VOICE_SIG && ie[0]) {
-			switch (ie[1] & 0x91) {
-			case 0x80:   /* hook off */
-			case 0x81:
-				if (plci->internal_command == PERM_COD_HOOK)
-				{
-					dbug(1, dprintf("init:hook_off"));
-					plci->hook_state = ie[1];
-					next_internal_command(Id, plci);
-					break;
-				}
-				else /* ignore doubled hook indications */
-				{
-					if (((plci->hook_state) & 0xf0) == 0x80)
-					{
-						dbug(1, dprintf("ignore hook"));
-						break;
-					}
-					plci->hook_state = ie[1]&0x91;
-				}
-				/* check for incoming call pending */
-				/* and signal '+'.Appl must decide */
-				/* with connect_res if call must   */
-				/* accepted or not                 */
-				for (i = 0, tplci = NULL; i < max_appl; i++) {
-					if (a->codec_listen[i]
-					    && (a->codec_listen[i]->State == INC_CON_PENDING
-						|| a->codec_listen[i]->State == INC_CON_ALERT)) {
-						tplci = a->codec_listen[i];
-						tplci->appl = &application[i];
-					}
-				}
-				/* no incoming call, do outgoing call */
-				/* and signal '+' if outg. setup   */
-				if (!a->AdvSignalPLCI && !tplci) {
-					if ((i = get_plci(a))) {
-						a->AdvSignalPLCI = &a->plci[i - 1];
-						tplci = a->AdvSignalPLCI;
-						tplci->tel  = ADV_VOICE;
-						PUT_WORD(&voice_cai[5], a->AdvSignalAppl->MaxDataLength);
-						if (a->Info_Mask[a->AdvSignalAppl->Id - 1] & 0x200) {
-							/* early B3 connect (CIP mask bit 9) no release after a disc */
-							add_p(tplci, LLI, "\x01\x01");
-						}
-						add_p(tplci, CAI, voice_cai);
-						add_p(tplci, OAD, a->TelOAD);
-						add_p(tplci, OSA, a->TelOSA);
-						add_p(tplci, SHIFT | 6, NULL);
-						add_p(tplci, SIN, "\x02\x01\x00");
-						add_p(tplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-						sig_req(tplci, ASSIGN, DSIG_ID);
-						a->AdvSignalPLCI->internal_command = HOOK_OFF_REQ;
-						a->AdvSignalPLCI->command = 0;
-						tplci->appl = a->AdvSignalAppl;
-						tplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
-						send_req(tplci);
-					}
-
-				}
-
-				if (!tplci) break;
-				Id = ((word)tplci->Id << 8) | a->Id;
-				Id |= EXT_CONTROLLER;
-				sendf(tplci->appl,
-				      _FACILITY_I,
-				      Id,
-				      0,
-				      "ws", (word)0, "\x01+");
-				break;
-
-			case 0x90:   /* hook on  */
-			case 0x91:
-				if (plci->internal_command == PERM_COD_HOOK)
-				{
-					dbug(1, dprintf("init:hook_on"));
-					plci->hook_state = ie[1] & 0x91;
-					next_internal_command(Id, plci);
-					break;
-				}
-				else /* ignore doubled hook indications */
-				{
-					if (((plci->hook_state) & 0xf0) == 0x90) break;
-					plci->hook_state = ie[1] & 0x91;
-				}
-				/* hangup the adv. voice call and signal '-' to the appl */
-				if (a->AdvSignalPLCI) {
-					Id = ((word)a->AdvSignalPLCI->Id << 8) | a->Id;
-					if (plci->tel) Id |= EXT_CONTROLLER;
-					sendf(a->AdvSignalAppl,
-					      _FACILITY_I,
-					      Id,
-					      0,
-					      "ws", (word)0, "\x01-");
-					a->AdvSignalPLCI->internal_command = HOOK_ON_REQ;
-					a->AdvSignalPLCI->command = 0;
-					sig_req(a->AdvSignalPLCI, HANGUP, 0);
-					send_req(a->AdvSignalPLCI);
-				}
-				break;
-			}
-		}
-		break;
-
-	case RESUME:
-		__clear_bit(plci->appl->Id - 1, plci->c_ind_mask_table);
-		PUT_WORD(&resume_cau[4], GOOD);
-		sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
-		break;
-
-	case SUSPEND:
-		bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
-
-		if (plci->NL.Id && !plci->nl_remove_id) {
-			mixer_remove(plci);
-			nl_req_ncci(plci, REMOVE, 0);
-		}
-		if (!plci->sig_remove_id) {
-			plci->internal_command = 0;
-			sig_req(plci, REMOVE, 0);
-		}
-		send_req(plci);
-		if (!plci->channels) {
-			sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, "\x05\x04\x00\x02\x00\x00");
-			sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
-		}
-		break;
-
-	case SUSPEND_REJ:
-		break;
-
-	case HANGUP:
-		plci->hangup_flow_ctrl_timer = 0;
-		if (plci->manufacturer && plci->State == LOCAL_CONNECT) break;
-		cau = parms[7];
-		if (cau) {
-			i = _L3_CAUSE | cau[2];
-			if (cau[2] == 0) i = 0;
-			else if (cau[2] == 8) i = _L1_ERROR;
-			else if (cau[2] == 9 || cau[2] == 10) i = _L2_ERROR;
-			else if (cau[2] == 5) i = _CAPI_GUARD_ERROR;
-		}
-		else {
-			i = _L3_ERROR;
-		}
-
-		if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
-		{
-			for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
-				sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
-		}
-		else
-		{
-			bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
-		}
-		if (!plci->appl)
-		{
-			if (plci->State == LISTENING)
-			{
-				plci->notifiedcall = 0;
-				a->listen_active--;
-			}
-			plci->State = INC_DIS_PENDING;
-			if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
-			{
-				plci->State = IDLE;
-				if (plci->NL.Id && !plci->nl_remove_id)
-				{
-					mixer_remove(plci);
-					nl_req_ncci(plci, REMOVE, 0);
-				}
-				if (!plci->sig_remove_id)
-				{
-					plci->internal_command = 0;
-					sig_req(plci, REMOVE, 0);
-				}
-				send_req(plci);
-			}
-		}
-		else
-		{
-			/* collision of DISCONNECT or CONNECT_RES with HANGUP can   */
-			/* result in a second HANGUP! Don't generate another        */
-			/* DISCONNECT                                               */
-			if (plci->State != IDLE && plci->State != INC_DIS_PENDING)
-			{
-				if (plci->State == RESUMING)
-				{
-					PUT_WORD(&resume_cau[4], i);
-					sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
-				}
-				plci->State = INC_DIS_PENDING;
-				sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", i);
-			}
-		}
-		break;
-
-	case SSEXT_IND:
-		SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
-		break;
-
-	case VSWITCH_REQ:
-		VSwitchReqInd(plci, Id, multi_vswitch_parms);
-		break;
-	case VSWITCH_IND:
-		if (plci->relatedPTYPLCI &&
-		    plci->vswitchstate == 3 &&
-		    plci->relatedPTYPLCI->vswitchstate == 3 &&
-		    parms[MAXPARMSIDS - 1][0])
-		{
-			add_p(plci->relatedPTYPLCI, SMSG, parms[MAXPARMSIDS - 1]);
-			sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
-			send_req(plci->relatedPTYPLCI);
-		}
-		else VSwitchReqInd(plci, Id, multi_vswitch_parms);
-		break;
-
-	}
-}
-
-
-static void SendSetupInfo(APPL *appl, PLCI *plci, dword Id, byte **parms, byte Info_Sent_Flag)
-{
-	word i;
-	byte *ie;
-	word Info_Number;
-	byte *Info_Element;
-	word Info_Mask = 0;
-
-	dbug(1, dprintf("SetupInfo"));
-
-	for (i = 0; i < MAXPARMSIDS; i++) {
-		ie = parms[i];
-		Info_Number = 0;
-		Info_Element = ie;
-		if (ie[0]) {
-			switch (i) {
-			case 0:
-				dbug(1, dprintf("CPN "));
-				Info_Number = 0x0070;
-				Info_Mask = 0x80;
-				Info_Sent_Flag = true;
-				break;
-			case 8:  /* display      */
-				dbug(1, dprintf("display(%d)", i));
-				Info_Number = 0x0028;
-				Info_Mask = 0x04;
-				Info_Sent_Flag = true;
-				break;
-			case 16: /* Channel Id */
-				dbug(1, dprintf("CHI"));
-				Info_Number = 0x0018;
-				Info_Mask = 0x100;
-				Info_Sent_Flag = true;
-				mixer_set_bchannel_id(plci, Info_Element);
-				break;
-			case 19: /* Redirected Number */
-				dbug(1, dprintf("RDN"));
-				Info_Number = 0x0074;
-				Info_Mask = 0x400;
-				Info_Sent_Flag = true;
-				break;
-			case 20: /* Redirected Number extended */
-				dbug(1, dprintf("RDX"));
-				Info_Number = 0x0073;
-				Info_Mask = 0x400;
-				Info_Sent_Flag = true;
-				break;
-			case 22: /* Redirecing Number  */
-				dbug(1, dprintf("RIN"));
-				Info_Number = 0x0076;
-				Info_Mask = 0x400;
-				Info_Sent_Flag = true;
-				break;
-			default:
-				Info_Number = 0;
-				break;
-			}
-		}
-
-		if (i == MAXPARMSIDS - 2) { /* to indicate the message type "Setup" */
-			Info_Number = 0x8000 | 5;
-			Info_Mask = 0x10;
-			Info_Element = "";
-		}
-
-		if (Info_Sent_Flag && Info_Number) {
-			if (plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask) {
-				sendf(appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
-			}
-		}
-	}
-}
-
-
-static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent)
-{
-	word i;
-	word j;
-	word k;
-	byte *ie;
-	word Info_Number;
-	byte *Info_Element;
-	word Info_Mask = 0;
-	static byte charges[5] = {4, 0, 0, 0, 0};
-	static byte cause[] = {0x02, 0x80, 0x00};
-	APPL *appl;
-
-	dbug(1, dprintf("InfoParse "));
-
-	if (
-		!plci->appl
-		&& !plci->State
-		&& plci->Sig.Ind != NCR_FACILITY
-		)
-	{
-		dbug(1, dprintf("NoParse "));
-		return;
-	}
-	cause[2] = 0;
-	for (i = 0; i < MAXPARMSIDS; i++) {
-		ie = parms[i];
-		Info_Number = 0;
-		Info_Element = ie;
-		if (ie[0]) {
-			switch (i) {
-			case 0:
-				dbug(1, dprintf("CPN "));
-				Info_Number = 0x0070;
-				Info_Mask   = 0x80;
-				break;
-			case 7: /* ESC_CAU */
-				dbug(1, dprintf("cau(0x%x)", ie[2]));
-				Info_Number = 0x0008;
-				Info_Mask = 0x00;
-				cause[2] = ie[2];
-				Info_Element = NULL;
-				break;
-			case 8:  /* display      */
-				dbug(1, dprintf("display(%d)", i));
-				Info_Number = 0x0028;
-				Info_Mask = 0x04;
-				break;
-			case 9:  /* Date display */
-				dbug(1, dprintf("date(%d)", i));
-				Info_Number = 0x0029;
-				Info_Mask = 0x02;
-				break;
-			case 10: /* charges */
-				for (j = 0; j < 4; j++) charges[1 + j] = 0;
-				for (j = 0; j < ie[0] && !(ie[1 + j] & 0x80); j++);
-				for (k = 1, j++; j < ie[0] && k <= 4; j++, k++) charges[k] = ie[1 + j];
-				Info_Number = 0x4000;
-				Info_Mask = 0x40;
-				Info_Element = charges;
-				break;
-			case 11: /* user user info */
-				dbug(1, dprintf("uui"));
-				Info_Number = 0x007E;
-				Info_Mask = 0x08;
-				break;
-			case 12: /* congestion receiver ready */
-				dbug(1, dprintf("clRDY"));
-				Info_Number = 0x00B0;
-				Info_Mask = 0x08;
-				Info_Element = "";
-				break;
-			case 13: /* congestion receiver not ready */
-				dbug(1, dprintf("clNRDY"));
-				Info_Number = 0x00BF;
-				Info_Mask = 0x08;
-				Info_Element = "";
-				break;
-			case 15: /* Keypad Facility */
-				dbug(1, dprintf("KEY"));
-				Info_Number = 0x002C;
-				Info_Mask = 0x20;
-				break;
-			case 16: /* Channel Id */
-				dbug(1, dprintf("CHI"));
-				Info_Number = 0x0018;
-				Info_Mask = 0x100;
-				mixer_set_bchannel_id(plci, Info_Element);
-				break;
-			case 17: /* if no 1tr6 cause, send full cause, else esc_cause */
-				dbug(1, dprintf("q9cau(0x%x)", ie[2]));
-				if (!cause[2] || cause[2] < 0x80) break;  /* eg. layer 1 error */
-				Info_Number = 0x0008;
-				Info_Mask = 0x01;
-				if (cause[2] != ie[2]) Info_Element = cause;
-				break;
-			case 19: /* Redirected Number */
-				dbug(1, dprintf("RDN"));
-				Info_Number = 0x0074;
-				Info_Mask = 0x400;
-				break;
-			case 22: /* Redirecing Number  */
-				dbug(1, dprintf("RIN"));
-				Info_Number = 0x0076;
-				Info_Mask = 0x400;
-				break;
-			case 23: /* Notification Indicator  */
-				dbug(1, dprintf("NI"));
-				Info_Number = (word)NI;
-				Info_Mask = 0x210;
-				break;
-			case 26: /* Call State  */
-				dbug(1, dprintf("CST"));
-				Info_Number = (word)CST;
-				Info_Mask = 0x01; /* do with cause i.e. for now */
-				break;
-			case MAXPARMSIDS - 2:  /* Escape Message Type, must be the last indication */
-				dbug(1, dprintf("ESC/MT[0x%x]", ie[3]));
-				Info_Number = 0x8000 | ie[3];
-				if (iesent) Info_Mask = 0xffff;
-				else  Info_Mask = 0x10;
-				Info_Element = "";
-				break;
-			default:
-				Info_Number  = 0;
-				Info_Mask    = 0;
-				Info_Element = "";
-				break;
-			}
-		}
-
-		if (plci->Sig.Ind == NCR_FACILITY)           /* check controller broadcast */
-		{
-			for (j = 0; j < max_appl; j++)
-			{
-				appl = &application[j];
-				if (Info_Number
-				    && appl->Id
-				    && plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
-				{
-					dbug(1, dprintf("NCR_Ind"));
-					iesent = true;
-					sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
-				}
-			}
-		}
-		else if (!plci->appl)
-		{ /* overlap receiving broadcast */
-			if (Info_Number == CPN
-			    || Info_Number == KEY
-			    || Info_Number == NI
-			    || Info_Number == DSP
-			    || Info_Number == UUI)
-			{
-				for_each_set_bit(j, plci->c_ind_mask_table, max_appl) {
-					dbug(1, dprintf("Ovl_Ind"));
-					iesent = true;
-					sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
-				}
-			}
-		}               /* all other signalling states */
-		else if (Info_Number
-			 && plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
-		{
-			dbug(1, dprintf("Std_Ind"));
-			iesent = true;
-			sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
-		}
-	}
-}
-
-
-static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type,
-			dword info_mask, byte setupParse)
-{
-	word i;
-	word j;
-	byte *ie;
-	word Info_Number;
-	byte *Info_Element;
-	APPL *appl;
-	word Info_Mask = 0;
-	byte iesent = 0;
-
-	if (
-		!plci->appl
-		&& !plci->State
-		&& plci->Sig.Ind != NCR_FACILITY
-		&& !setupParse
-		)
-	{
-		dbug(1, dprintf("NoM-IEParse "));
-		return 0;
-	}
-	dbug(1, dprintf("M-IEParse "));
-
-	for (i = 0; i < MAX_MULTI_IE; i++)
-	{
-		ie = parms[i];
-		Info_Number = 0;
-		Info_Element = ie;
-		if (ie[0])
-		{
-			dbug(1, dprintf("[Ind0x%x]:IE=0x%x", plci->Sig.Ind, ie_type));
-			Info_Number = (word)ie_type;
-			Info_Mask = (word)info_mask;
-		}
-
-		if (plci->Sig.Ind == NCR_FACILITY)           /* check controller broadcast */
-		{
-			for (j = 0; j < max_appl; j++)
-			{
-				appl = &application[j];
-				if (Info_Number
-				    && appl->Id
-				    && plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
-				{
-					iesent = true;
-					dbug(1, dprintf("Mlt_NCR_Ind"));
-					sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
-				}
-			}
-		}
-		else if (!plci->appl && Info_Number)
-		{                                        /* overlap receiving broadcast */
-			for_each_set_bit(j, plci->c_ind_mask_table, max_appl) {
-				iesent = true;
-				dbug(1, dprintf("Mlt_Ovl_Ind"));
-				sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
-			}
-		}                                        /* all other signalling states */
-		else if (Info_Number
-			 && plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
-		{
-			iesent = true;
-			dbug(1, dprintf("Mlt_Std_Ind"));
-			sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
-		}
-	}
-	return iesent;
-}
-
-static void SendSSExtInd(APPL *appl, PLCI *plci, dword Id, byte **parms)
-{
-	word i;
-	/* Format of multi_ssext_parms[i][]:
-	   0 byte length
-	   1 byte SSEXTIE
-	   2 byte SSEXT_REQ/SSEXT_IND
-	   3 byte length
-	   4 word SSExtCommand
-	   6... Params
-	*/
-	if (
-		plci
-		&& plci->State
-		&& plci->Sig.Ind != NCR_FACILITY
-		)
-		for (i = 0; i < MAX_MULTI_IE; i++)
-		{
-			if (parms[i][0] < 6) continue;
-			if (parms[i][2] == SSEXT_REQ) continue;
-
-			if (appl)
-			{
-				parms[i][0] = 0; /* kill it */
-				sendf(appl, _MANUFACTURER_I,
-				      Id,
-				      0,
-				      "dwS",
-				      _DI_MANU_ID,
-				      _DI_SSEXT_CTRL,
-				      &parms[i][3]);
-			}
-			else if (plci->appl)
-			{
-				parms[i][0] = 0; /* kill it */
-				sendf(plci->appl, _MANUFACTURER_I,
-				      Id,
-				      0,
-				      "dwS",
-				      _DI_MANU_ID,
-				      _DI_SSEXT_CTRL,
-				      &parms[i][3]);
-			}
-		}
-};
-
-static void nl_ind(PLCI *plci)
-{
-	byte ch;
-	word ncci;
-	dword Id;
-	DIVA_CAPI_ADAPTER *a;
-	word NCCIcode;
-	APPL *APPLptr;
-	word count;
-	word Num;
-	word i, ncpi_state;
-	byte len, ncci_state;
-	word msg;
-	word info = 0;
-	word fax_feature_bits;
-	byte fax_send_edata_ack;
-	static byte v120_header_buffer[2 + 3];
-	static word fax_info[] = {
-		0,                     /* T30_SUCCESS                        */
-		_FAX_NO_CONNECTION,    /* T30_ERR_NO_DIS_RECEIVED            */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_TIMEOUT_NO_RESPONSE        */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_RESPONSE          */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_TOO_MANY_REPEATS           */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_UNEXPECTED_MESSAGE         */
-		_FAX_REMOTE_ABORT,     /* T30_ERR_UNEXPECTED_DCN             */
-		_FAX_LOCAL_ABORT,      /* T30_ERR_DTC_UNSUPPORTED            */
-		_FAX_TRAINING_ERROR,   /* T30_ERR_ALL_RATES_FAILED           */
-		_FAX_TRAINING_ERROR,   /* T30_ERR_TOO_MANY_TRAINS            */
-		_FAX_PARAMETER_ERROR,  /* T30_ERR_RECEIVE_CORRUPTED          */
-		_FAX_REMOTE_ABORT,     /* T30_ERR_UNEXPECTED_DISC            */
-		_FAX_LOCAL_ABORT,      /* T30_ERR_APPLICATION_DISC           */
-		_FAX_REMOTE_REJECT,    /* T30_ERR_INCOMPATIBLE_DIS           */
-		_FAX_LOCAL_ABORT,      /* T30_ERR_INCOMPATIBLE_DCS           */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_TIMEOUT_NO_COMMAND         */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_COMMAND           */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_TIMEOUT_COMMAND_TOO_LONG   */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_TIMEOUT_RESPONSE_TOO_LONG  */
-		_FAX_NO_CONNECTION,    /* T30_ERR_NOT_IDENTIFIED             */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_SUPERVISORY_TIMEOUT        */
-		_FAX_PARAMETER_ERROR,  /* T30_ERR_TOO_LONG_SCAN_LINE         */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_PAGE_AFTER_MPS    */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_PAGE_AFTER_CFR    */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_DCS_AFTER_FTT     */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_DCS_AFTER_EOM     */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_DCS_AFTER_MPS     */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_DCN_AFTER_MCF     */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_DCN_AFTER_RTN     */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_CFR               */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_MCF_AFTER_EOP     */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_MCF_AFTER_EOM     */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_RETRY_NO_MCF_AFTER_MPS     */
-		0x331d,                /* T30_ERR_SUB_SEP_UNSUPPORTED        */
-		0x331e,                /* T30_ERR_PWD_UNSUPPORTED            */
-		0x331f,                /* T30_ERR_SUB_SEP_PWD_UNSUPPORTED    */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_INVALID_COMMAND_FRAME      */
-		_FAX_PARAMETER_ERROR,  /* T30_ERR_UNSUPPORTED_PAGE_CODING    */
-		_FAX_PARAMETER_ERROR,  /* T30_ERR_INVALID_PAGE_CODING        */
-		_FAX_REMOTE_REJECT,    /* T30_ERR_INCOMPATIBLE_PAGE_CONFIG   */
-		_FAX_LOCAL_ABORT,      /* T30_ERR_TIMEOUT_FROM_APPLICATION   */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_V34FAX_NO_REACTION_ON_MARK */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_V34FAX_TRAINING_TIMEOUT    */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_V34FAX_UNEXPECTED_V21      */
-		_FAX_PROTOCOL_ERROR,   /* T30_ERR_V34FAX_PRIMARY_CTS_ON      */
-		_FAX_LOCAL_ABORT,      /* T30_ERR_V34FAX_TURNAROUND_POLLING  */
-		_FAX_LOCAL_ABORT       /* T30_ERR_V34FAX_V8_INCOMPATIBILITY  */
-	};
-
-	byte dtmf_code_buffer[CAPIDTMF_RECV_DIGIT_BUFFER_SIZE + 1];
-
-
-	static word rtp_info[] = {
-		GOOD,                  /* RTP_SUCCESS                       */
-		0x3600                 /* RTP_ERR_SSRC_OR_PAYLOAD_CHANGE    */
-	};
-
-	static dword udata_forwarding_table[0x100 / sizeof(dword)] =
-		{
-			0x0020301e, 0x00000000, 0x00000000, 0x00000000,
-			0x00000000, 0x00000000, 0x00000000, 0x00000000
-		};
-
-	ch = plci->NL.IndCh;
-	a = plci->adapter;
-	ncci = a->ch_ncci[ch];
-	Id = (((dword)(ncci ? ncci : ch)) << 16) | (((word) plci->Id) << 8) | a->Id;
-	if (plci->tel) Id |= EXT_CONTROLLER;
-	APPLptr = plci->appl;
-	dbug(1, dprintf("NL_IND-Id(NL:0x%x)=0x%08lx,plci=%x,tel=%x,state=0x%x,ch=0x%x,chs=%d,Ind=%x",
-			plci->NL.Id, Id, plci->Id, plci->tel, plci->State, ch, plci->channels, plci->NL.Ind & 0x0f));
-
-	/* in the case if no connect_active_Ind was sent to the appl we wait for */
-
-	if (plci->nl_remove_id)
-	{
-		plci->NL.RNR = 2; /* discard */
-		dbug(1, dprintf("NL discard while remove pending"));
-		return;
-	}
-	if ((plci->NL.Ind & 0x0f) == N_CONNECT)
-	{
-		if (plci->State == INC_DIS_PENDING
-		    || plci->State == OUTG_DIS_PENDING
-		    || plci->State == IDLE)
-		{
-			plci->NL.RNR = 2; /* discard */
-			dbug(1, dprintf("discard n_connect"));
-			return;
-		}
-		if (plci->State < INC_ACT_PENDING)
-		{
-			plci->NL.RNR = 1; /* flow control */
-			channel_x_off(plci, ch, N_XON_CONNECT_IND);
-			return;
-		}
-	}
-
-	if (!APPLptr)                         /* no application or invalid data */
-	{                                    /* while reloading the DSP        */
-		dbug(1, dprintf("discard1"));
-		plci->NL.RNR = 2;
-		return;
-	}
-
-	if (((plci->NL.Ind & 0x0f) == N_UDATA)
-	    && (((plci->B2_prot != B2_SDLC) && ((plci->B1_resource == 17) || (plci->B1_resource == 18)))
-		|| (plci->B2_prot == 7)
-		|| (plci->B3_prot == 7)))
-	{
-		plci->ncpi_buffer[0] = 0;
-
-		ncpi_state = plci->ncpi_state;
-		if (plci->NL.complete == 1)
-		{
-			byte *data = &plci->NL.RBuffer->P[0];
-
-			if ((plci->NL.RBuffer->length >= 12)
-			    && ((*data == DSP_UDATA_INDICATION_DCD_ON)
-				|| (*data == DSP_UDATA_INDICATION_CTS_ON)))
-			{
-				word conn_opt, ncpi_opt = 0x00;
-/*      HexDump ("MDM N_UDATA:", plci->NL.RBuffer->length, data); */
-
-				if (*data == DSP_UDATA_INDICATION_DCD_ON)
-					plci->ncpi_state |= NCPI_MDM_DCD_ON_RECEIVED;
-				if (*data == DSP_UDATA_INDICATION_CTS_ON)
-					plci->ncpi_state |= NCPI_MDM_CTS_ON_RECEIVED;
-
-				data++;    /* indication code */
-				data += 2; /* timestamp */
-				if ((*data == DSP_CONNECTED_NORM_V18) || (*data == DSP_CONNECTED_NORM_VOWN))
-					ncpi_state &= ~(NCPI_MDM_DCD_ON_RECEIVED | NCPI_MDM_CTS_ON_RECEIVED);
-				data++;    /* connected norm */
-				conn_opt = GET_WORD(data);
-				data += 2; /* connected options */
-
-				PUT_WORD(&(plci->ncpi_buffer[1]), (word)(GET_DWORD(data) & 0x0000FFFF));
-
-				if (conn_opt & DSP_CONNECTED_OPTION_MASK_V42)
-				{
-					ncpi_opt |= MDM_NCPI_ECM_V42;
-				}
-				else if (conn_opt & DSP_CONNECTED_OPTION_MASK_MNP)
-				{
-					ncpi_opt |= MDM_NCPI_ECM_MNP;
-				}
-				else
-				{
-					ncpi_opt |= MDM_NCPI_TRANSPARENT;
-				}
-				if (conn_opt & DSP_CONNECTED_OPTION_MASK_COMPRESSION)
-				{
-					ncpi_opt |= MDM_NCPI_COMPRESSED;
-				}
-				PUT_WORD(&(plci->ncpi_buffer[3]), ncpi_opt);
-				plci->ncpi_buffer[0] = 4;
-
-				plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND | NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
-			}
-		}
-		if (plci->B3_prot == 7)
-		{
-			if (((a->ncci_state[ncci] == INC_ACT_PENDING) || (a->ncci_state[ncci] == OUTG_CON_PENDING))
-			    && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-			    && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
-			{
-				a->ncci_state[ncci] = INC_ACT_PENDING;
-				sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
-				plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
-			}
-		}
-
-		if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
-		      & ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
-		    || !(ncpi_state & NCPI_MDM_DCD_ON_RECEIVED)
-		    || !(ncpi_state & NCPI_MDM_CTS_ON_RECEIVED))
-
-		{
-			plci->NL.RNR = 2;
-			return;
-		}
-	}
-
-	if (plci->NL.complete == 2)
-	{
-		if (((plci->NL.Ind & 0x0f) == N_UDATA)
-		    && !(udata_forwarding_table[plci->RData[0].P[0] >> 5] & (1L << (plci->RData[0].P[0] & 0x1f))))
-		{
-			switch (plci->RData[0].P[0])
-			{
-
-			case DTMF_UDATA_INDICATION_FAX_CALLING_TONE:
-				if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
-					sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01X");
-				break;
-			case DTMF_UDATA_INDICATION_ANSWER_TONE:
-				if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
-					sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01Y");
-				break;
-			case DTMF_UDATA_INDICATION_DIGITS_RECEIVED:
-				dtmf_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
-				break;
-			case DTMF_UDATA_INDICATION_DIGITS_SENT:
-				dtmf_confirmation(Id, plci);
-				break;
-
-
-			case UDATA_INDICATION_MIXER_TAP_DATA:
-				capidtmf_recv_process_block(&(plci->capidtmf_state), plci->RData[0].P + 1, (word)(plci->RData[0].PLength - 1));
-				i = capidtmf_indication(&(plci->capidtmf_state), dtmf_code_buffer + 1);
-				if (i != 0)
-				{
-					dtmf_code_buffer[0] = DTMF_UDATA_INDICATION_DIGITS_RECEIVED;
-					dtmf_indication(Id, plci, dtmf_code_buffer, (word)(i + 1));
-				}
-				break;
-
-
-			case UDATA_INDICATION_MIXER_COEFS_SET:
-				mixer_indication_coefs_set(Id, plci);
-				break;
-			case UDATA_INDICATION_XCONNECT_FROM:
-				mixer_indication_xconnect_from(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
-				break;
-			case UDATA_INDICATION_XCONNECT_TO:
-				mixer_indication_xconnect_to(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
-				break;
-
-
-			case LEC_UDATA_INDICATION_DISABLE_DETECT:
-				ec_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
-				break;
-
-
-
-			default:
-				break;
-			}
-		}
-		else
-		{
-			if ((plci->RData[0].PLength != 0)
-			    && ((plci->B2_prot == B2_V120_ASYNC)
-				|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
-				|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
-			{
-
-				sendf(plci->appl, _DATA_B3_I, Id, 0,
-				      "dwww",
-				      plci->RData[1].P,
-				      (plci->NL.RNum < 2) ? 0 : plci->RData[1].PLength,
-				      plci->RNum,
-				      plci->RFlags);
-
-			}
-			else
-			{
-
-				sendf(plci->appl, _DATA_B3_I, Id, 0,
-				      "dwww",
-				      plci->RData[0].P,
-				      plci->RData[0].PLength,
-				      plci->RNum,
-				      plci->RFlags);
-
-			}
-		}
-		return;
-	}
-
-	fax_feature_bits = 0;
-	if ((plci->NL.Ind & 0x0f) == N_CONNECT ||
-	    (plci->NL.Ind & 0x0f) == N_CONNECT_ACK ||
-	    (plci->NL.Ind & 0x0f) == N_DISC ||
-	    (plci->NL.Ind & 0x0f) == N_EDATA ||
-	    (plci->NL.Ind & 0x0f) == N_DISC_ACK)
-	{
-		info = 0;
-		plci->ncpi_buffer[0] = 0;
-		switch (plci->B3_prot) {
-		case  0: /*XPARENT*/
-		case  1: /*T.90 NL*/
-			break;    /* no network control protocol info - jfr */
-		case  2: /*ISO8202*/
-		case  3: /*X25 DCE*/
-			for (i = 0; i < plci->NL.RLength; i++) plci->ncpi_buffer[4 + i] = plci->NL.RBuffer->P[i];
-			plci->ncpi_buffer[0] = (byte)(i + 3);
-			plci->ncpi_buffer[1] = (byte)(plci->NL.Ind & N_D_BIT ? 1 : 0);
-			plci->ncpi_buffer[2] = 0;
-			plci->ncpi_buffer[3] = 0;
-			break;
-		case  4: /*T.30 - FAX*/
-		case  5: /*T.30 - FAX*/
-			if (plci->NL.RLength >= sizeof(T30_INFO))
-			{
-				dbug(1, dprintf("FaxStatus %04x", ((T30_INFO *)plci->NL.RBuffer->P)->code));
-				len = 9;
-				PUT_WORD(&(plci->ncpi_buffer[1]), ((T30_INFO *)plci->NL.RBuffer->P)->rate_div_2400 * 2400);
-				fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
-				i = (((T30_INFO *)plci->NL.RBuffer->P)->resolution & T30_RESOLUTION_R8_0770_OR_200) ? 0x0001 : 0x0000;
-				if (plci->B3_prot == 5)
-				{
-					if (!(fax_feature_bits & T30_FEATURE_BIT_ECM))
-						i |= 0x8000; /* This is not an ECM connection */
-					if (fax_feature_bits & T30_FEATURE_BIT_T6_CODING)
-						i |= 0x4000; /* This is a connection with MMR compression */
-					if (fax_feature_bits & T30_FEATURE_BIT_2D_CODING)
-						i |= 0x2000; /* This is a connection with MR compression */
-					if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
-						i |= 0x0004; /* More documents */
-					if (fax_feature_bits & T30_FEATURE_BIT_POLLING)
-						i |= 0x0002; /* Fax-polling indication */
-				}
-				dbug(1, dprintf("FAX Options %04x %04x", fax_feature_bits, i));
-				PUT_WORD(&(plci->ncpi_buffer[3]), i);
-				PUT_WORD(&(plci->ncpi_buffer[5]), ((T30_INFO *)plci->NL.RBuffer->P)->data_format);
-				plci->ncpi_buffer[7] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_low;
-				plci->ncpi_buffer[8] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_high;
-				plci->ncpi_buffer[len] = 0;
-				if (((T30_INFO *)plci->NL.RBuffer->P)->station_id_len)
-				{
-					plci->ncpi_buffer[len] = 20;
-					for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
-						plci->ncpi_buffer[++len] = ((T30_INFO *)plci->NL.RBuffer->P)->station_id[i];
-				}
-				if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
-				{
-					if (((T30_INFO *)plci->NL.RBuffer->P)->code < ARRAY_SIZE(fax_info))
-						info = fax_info[((T30_INFO *)plci->NL.RBuffer->P)->code];
-					else
-						info = _FAX_PROTOCOL_ERROR;
-				}
-
-				if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
-				    & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
-				{
-					i = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len;
-					while (i < plci->NL.RBuffer->length)
-						plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++];
-				}
-
-				plci->ncpi_buffer[0] = len;
-				fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
-				PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low, fax_feature_bits);
-
-				plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND;
-				if (((plci->NL.Ind & 0x0f) == N_CONNECT_ACK)
-				    || (((plci->NL.Ind & 0x0f) == N_CONNECT)
-					&& (fax_feature_bits & T30_FEATURE_BIT_POLLING))
-				    || (((plci->NL.Ind & 0x0f) == N_EDATA)
-					&& ((((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_TRAIN_OK)
-					    || (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
-					    || (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DTC))))
-				{
-					plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT;
-				}
-				if (((plci->NL.Ind & 0x0f) == N_DISC)
-				    || ((plci->NL.Ind & 0x0f) == N_DISC_ACK)
-				    || (((plci->NL.Ind & 0x0f) == N_EDATA)
-					&& (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_EOP_CAPI)))
-				{
-					plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
-				}
-			}
-			break;
-
-		case B3_RTP:
-			if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
-			{
-				if (plci->NL.RLength != 0)
-				{
-					info = rtp_info[plci->NL.RBuffer->P[0]];
-					plci->ncpi_buffer[0] = plci->NL.RLength - 1;
-					for (i = 1; i < plci->NL.RLength; i++)
-						plci->ncpi_buffer[i] = plci->NL.RBuffer->P[i];
-				}
-			}
-			break;
-
-		}
-		plci->NL.RNR = 2;
-	}
-	switch (plci->NL.Ind & 0x0f) {
-	case N_EDATA:
-		if ((plci->B3_prot == 4) || (plci->B3_prot == 5))
-		{
-			dbug(1, dprintf("EDATA ncci=0x%x state=%d code=%02x", ncci, a->ncci_state[ncci],
-					((T30_INFO *)plci->NL.RBuffer->P)->code));
-			fax_send_edata_ack = (((T30_INFO *)(plci->fax_connect_info_buffer))->operating_mode == T30_OPERATING_MODE_CAPI_NEG);
-
-			if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
-			    && (plci->nsf_control_bits & (T30_NSF_CONTROL_BIT_NEGOTIATE_IND | T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
-			    && (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
-			    && (a->ncci_state[ncci] == OUTG_CON_PENDING)
-			    && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-			    && !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
-			{
-				((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
-				sendf(plci->appl, _MANUFACTURER_I, Id, 0, "dwbS", _DI_MANU_ID, _DI_NEGOTIATE_B3,
-				      (byte)(plci->ncpi_buffer[0] + 1), plci->ncpi_buffer);
-				plci->ncpi_state |= NCPI_NEGOTIATE_B3_SENT;
-				if (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)
-					fax_send_edata_ack = false;
-			}
-
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
-			{
-				switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
-				{
-				case EDATA_T30_DIS:
-					if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
-					    && !(GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) & T30_CONTROL_BIT_REQUEST_POLLING)
-					    && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-					    && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
-					{
-						a->ncci_state[ncci] = INC_ACT_PENDING;
-						if (plci->B3_prot == 4)
-							sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-						else
-							sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
-						plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
-					}
-					break;
-
-				case EDATA_T30_TRAIN_OK:
-					if ((a->ncci_state[ncci] == INC_ACT_PENDING)
-					    && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-					    && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
-					{
-						if (plci->B3_prot == 4)
-							sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-						else
-							sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
-						plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
-					}
-					break;
-
-				case EDATA_T30_EOP_CAPI:
-					if (a->ncci_state[ncci] == CONNECTED)
-					{
-						sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", GOOD, plci->ncpi_buffer);
-						a->ncci_state[ncci] = INC_DIS_PENDING;
-						plci->ncpi_state = 0;
-						fax_send_edata_ack = false;
-					}
-					break;
-				}
-			}
-			else
-			{
-				switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
-				{
-				case EDATA_T30_TRAIN_OK:
-					if ((a->ncci_state[ncci] == INC_ACT_PENDING)
-					    && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-					    && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
-					{
-						if (plci->B3_prot == 4)
-							sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-						else
-							sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
-						plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
-					}
-					break;
-				}
-			}
-			if (fax_send_edata_ack)
-			{
-				((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
-				plci->fax_edata_ack_length = 1;
-				start_internal_command(Id, plci, fax_edata_ack_command);
-			}
-		}
-		else
-		{
-			dbug(1, dprintf("EDATA ncci=0x%x state=%d", ncci, a->ncci_state[ncci]));
-		}
-		break;
-	case N_CONNECT:
-		if (!a->ch_ncci[ch])
-		{
-			ncci = get_ncci(plci, ch, 0);
-			Id = (Id & 0xffff) | (((dword) ncci) << 16);
-		}
-		dbug(1, dprintf("N_CONNECT: ch=%d state=%d plci=%lx plci_Id=%lx plci_State=%d",
-				ch, a->ncci_state[ncci], a->ncci_plci[ncci], plci->Id, plci->State));
-
-		msg = _CONNECT_B3_I;
-		if (a->ncci_state[ncci] == IDLE)
-			plci->channels++;
-		else if (plci->B3_prot == 1)
-			msg = _CONNECT_B3_T90_ACTIVE_I;
-
-		a->ncci_state[ncci] = INC_CON_PENDING;
-		if (plci->B3_prot == 4)
-			sendf(plci->appl, msg, Id, 0, "s", "");
-		else
-			sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
-		break;
-	case N_CONNECT_ACK:
-		dbug(1, dprintf("N_connect_Ack"));
-		if (plci->internal_command_queue[0]
-		    && ((plci->adjust_b_state == ADJUST_B_CONNECT_2)
-			|| (plci->adjust_b_state == ADJUST_B_CONNECT_3)
-			|| (plci->adjust_b_state == ADJUST_B_CONNECT_4)))
-		{
-			(*(plci->internal_command_queue[0]))(Id, plci, 0);
-			if (!plci->internal_command)
-				next_internal_command(Id, plci);
-			break;
-		}
-		msg = _CONNECT_B3_ACTIVE_I;
-		if (plci->B3_prot == 1)
-		{
-			if (a->ncci_state[ncci] != OUTG_CON_PENDING)
-				msg = _CONNECT_B3_T90_ACTIVE_I;
-			a->ncci_state[ncci] = INC_ACT_PENDING;
-			sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
-		}
-		else if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
-		{
-			if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
-			    && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-			    && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
-			{
-				a->ncci_state[ncci] = INC_ACT_PENDING;
-				if (plci->B3_prot == 4)
-					sendf(plci->appl, msg, Id, 0, "s", "");
-				else
-					sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
-				plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
-			}
-		}
-		else
-		{
-			a->ncci_state[ncci] = INC_ACT_PENDING;
-			sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
-		}
-		if (plci->adjust_b_restore)
-		{
-			plci->adjust_b_restore = false;
-			start_internal_command(Id, plci, adjust_b_restore);
-		}
-		break;
-	case N_DISC:
-	case N_DISC_ACK:
-		if (plci->internal_command_queue[0]
-		    && ((plci->internal_command == FAX_DISCONNECT_COMMAND_1)
-			|| (plci->internal_command == FAX_DISCONNECT_COMMAND_2)
-			|| (plci->internal_command == FAX_DISCONNECT_COMMAND_3)))
-		{
-			(*(plci->internal_command_queue[0]))(Id, plci, 0);
-			if (!plci->internal_command)
-				next_internal_command(Id, plci);
-		}
-		ncci_state = a->ncci_state[ncci];
-		ncci_remove(plci, ncci, false);
-
-		/* with N_DISC or N_DISC_ACK the IDI frees the respective   */
-		/* channel, so we cannot store the state in ncci_state! The */
-		/* information which channel we received a N_DISC is thus   */
-		/* stored in the inc_dis_ncci_table buffer.                 */
-		for (i = 0; plci->inc_dis_ncci_table[i]; i++);
-		plci->inc_dis_ncci_table[i] = (byte) ncci;
-
-		/* need a connect_b3_ind before a disconnect_b3_ind with FAX */
-		if (!plci->channels
-		    && (plci->B1_resource == 16)
-		    && (plci->State <= CONNECTED))
-		{
-			len = 9;
-			i = ((T30_INFO *)plci->fax_connect_info_buffer)->rate_div_2400 * 2400;
-			PUT_WORD(&plci->ncpi_buffer[1], i);
-			PUT_WORD(&plci->ncpi_buffer[3], 0);
-			i = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
-			PUT_WORD(&plci->ncpi_buffer[5], i);
-			PUT_WORD(&plci->ncpi_buffer[7], 0);
-			plci->ncpi_buffer[len] = 0;
-			plci->ncpi_buffer[0] = len;
-			if (plci->B3_prot == 4)
-				sendf(plci->appl, _CONNECT_B3_I, Id, 0, "s", "");
-			else
-			{
-
-				if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
-				    & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
-				{
-					plci->ncpi_buffer[++len] = 0;
-					plci->ncpi_buffer[++len] = 0;
-					plci->ncpi_buffer[++len] = 0;
-					plci->ncpi_buffer[0] = len;
-				}
-
-				sendf(plci->appl, _CONNECT_B3_I, Id, 0, "S", plci->ncpi_buffer);
-			}
-			sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
-			plci->ncpi_state = 0;
-			sig_req(plci, HANGUP, 0);
-			send_req(plci);
-			plci->State = OUTG_DIS_PENDING;
-			/* disc here */
-		}
-		else if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
-			 && ((plci->B3_prot == 4) || (plci->B3_prot == 5))
-			 && ((ncci_state == INC_DIS_PENDING) || (ncci_state == IDLE)))
-		{
-			if (ncci_state == IDLE)
-			{
-				if (plci->channels)
-					plci->channels--;
-				if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
-					if (plci->State == SUSPENDING) {
-						sendf(plci->appl,
-						      _FACILITY_I,
-						      Id & 0xffffL,
-						      0,
-						      "ws", (word)3, "\x03\x04\x00\x00");
-						sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
-					}
-					plci_remove(plci);
-					plci->State = IDLE;
-				}
-			}
-		}
-		else if (plci->channels)
-		{
-			sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
-			plci->ncpi_state = 0;
-			if ((ncci_state == OUTG_REJ_PENDING)
-			    && ((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE)))
-			{
-				sig_req(plci, HANGUP, 0);
-				send_req(plci);
-				plci->State = OUTG_DIS_PENDING;
-			}
-		}
-		break;
-	case N_RESET:
-		a->ncci_state[ncci] = INC_RES_PENDING;
-		sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
-		break;
-	case N_RESET_ACK:
-		a->ncci_state[ncci] = CONNECTED;
-		sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
-		break;
-
-	case N_UDATA:
-		if (!(udata_forwarding_table[plci->NL.RBuffer->P[0] >> 5] & (1L << (plci->NL.RBuffer->P[0] & 0x1f))))
-		{
-			plci->RData[0].P = plci->internal_ind_buffer + (-((int)(long)(plci->internal_ind_buffer)) & 3);
-			plci->RData[0].PLength = INTERNAL_IND_BUFFER_SIZE;
-			plci->NL.R = plci->RData;
-			plci->NL.RNum = 1;
-			return;
-		}
-		/* fall through */
-	case N_BDATA:
-	case N_DATA:
-		if (((a->ncci_state[ncci] != CONNECTED) && (plci->B2_prot == 1)) /* transparent */
-		    || (a->ncci_state[ncci] == IDLE)
-		    || (a->ncci_state[ncci] == INC_DIS_PENDING))
-		{
-			plci->NL.RNR = 2;
-			break;
-		}
-		if ((a->ncci_state[ncci] != CONNECTED)
-		    && (a->ncci_state[ncci] != OUTG_DIS_PENDING)
-		    && (a->ncci_state[ncci] != OUTG_REJ_PENDING))
-		{
-			dbug(1, dprintf("flow control"));
-			plci->NL.RNR = 1; /* flow control  */
-			channel_x_off(plci, ch, 0);
-			break;
-		}
-
-		NCCIcode = ncci | (((word)a->Id) << 8);
-
-		/* count all buffers within the Application pool    */
-		/* belonging to the same NCCI. If this is below the */
-		/* number of buffers available per NCCI we accept   */
-		/* this packet, otherwise we reject it              */
-		count = 0;
-		Num = 0xffff;
-		for (i = 0; i < APPLptr->MaxBuffer; i++) {
-			if (NCCIcode == APPLptr->DataNCCI[i]) count++;
-			if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
-		}
-
-		if (count >= APPLptr->MaxNCCIData || Num == 0xffff)
-		{
-			dbug(3, dprintf("Flow-Control"));
-			plci->NL.RNR = 1;
-			if (++(APPLptr->NCCIDataFlowCtrlTimer) >=
-			    (word)((a->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL) ? 40 : 2000))
-			{
-				plci->NL.RNR = 2;
-				dbug(3, dprintf("DiscardData"));
-			} else {
-				channel_x_off(plci, ch, 0);
-			}
-			break;
-		}
-		else
-		{
-			APPLptr->NCCIDataFlowCtrlTimer = 0;
-		}
-
-		plci->RData[0].P = ReceiveBufferGet(APPLptr, Num);
-		if (!plci->RData[0].P) {
-			plci->NL.RNR = 1;
-			channel_x_off(plci, ch, 0);
-			break;
-		}
-
-		APPLptr->DataNCCI[Num] = NCCIcode;
-		APPLptr->DataFlags[Num] = (plci->Id << 8) | (plci->NL.Ind >> 4);
-		dbug(3, dprintf("Buffer(%d), Max = %d", Num, APPLptr->MaxBuffer));
-
-		plci->RNum = Num;
-		plci->RFlags = plci->NL.Ind >> 4;
-		plci->RData[0].PLength = APPLptr->MaxDataLength;
-		plci->NL.R = plci->RData;
-		if ((plci->NL.RLength != 0)
-		    && ((plci->B2_prot == B2_V120_ASYNC)
-			|| (plci->B2_prot == B2_V120_ASYNC_V42BIS)
-			|| (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
-		{
-			plci->RData[1].P = plci->RData[0].P;
-			plci->RData[1].PLength = plci->RData[0].PLength;
-			plci->RData[0].P = v120_header_buffer + (-((unsigned long)v120_header_buffer) & 3);
-			if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1))
-				plci->RData[0].PLength = 1;
-			else
-				plci->RData[0].PLength = 2;
-			if (plci->NL.RBuffer->P[0] & V120_HEADER_BREAK_BIT)
-				plci->RFlags |= 0x0010;
-			if (plci->NL.RBuffer->P[0] & (V120_HEADER_C1_BIT | V120_HEADER_C2_BIT))
-				plci->RFlags |= 0x8000;
-			plci->NL.RNum = 2;
-		}
-		else
-		{
-			if ((plci->NL.Ind & 0x0f) == N_UDATA)
-				plci->RFlags |= 0x0010;
-
-			else if ((plci->B3_prot == B3_RTP) && ((plci->NL.Ind & 0x0f) == N_BDATA))
-				plci->RFlags |= 0x0001;
-
-			plci->NL.RNum = 1;
-		}
-		break;
-	case N_DATA_ACK:
-		data_ack(plci, ch);
-		break;
-	default:
-		plci->NL.RNR = 2;
-		break;
-	}
-}
-
-/*------------------------------------------------------------------*/
-/* find a free PLCI */
-/*------------------------------------------------------------------*/
-
-static word get_plci(DIVA_CAPI_ADAPTER *a)
-{
-	word i, j;
-	PLCI *plci;
-
-	for (i = 0; i < a->max_plci && a->plci[i].Id; i++);
-	if (i == a->max_plci) {
-		dbug(1, dprintf("get_plci: out of PLCIs"));
-		return 0;
-	}
-	plci = &a->plci[i];
-	plci->Id = (byte)(i + 1);
-
-	plci->Sig.Id = 0;
-	plci->NL.Id = 0;
-	plci->sig_req = 0;
-	plci->nl_req = 0;
-
-	plci->appl = NULL;
-	plci->relatedPTYPLCI = NULL;
-	plci->State = IDLE;
-	plci->SuppState = IDLE;
-	plci->channels = 0;
-	plci->tel = 0;
-	plci->B1_resource = 0;
-	plci->B2_prot = 0;
-	plci->B3_prot = 0;
-
-	plci->command = 0;
-	plci->m_command = 0;
-	init_internal_command_queue(plci);
-	plci->number = 0;
-	plci->req_in_start = 0;
-	plci->req_in = 0;
-	plci->req_out = 0;
-	plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
-	plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
-	plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
-
-	plci->data_sent = false;
-	plci->send_disc = 0;
-	plci->sig_global_req = 0;
-	plci->sig_remove_id = 0;
-	plci->nl_global_req = 0;
-	plci->nl_remove_id = 0;
-	plci->adv_nl = 0;
-	plci->manufacturer = false;
-	plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
-	plci->spoofed_msg = 0;
-	plci->ptyState = 0;
-	plci->cr_enquiry = false;
-	plci->hangup_flow_ctrl_timer = 0;
-
-	plci->ncci_ring_list = 0;
-	for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0;
-	bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
-	bitmap_fill(plci->group_optimization_mask_table, MAX_APPL);
-	plci->fax_connect_info_length = 0;
-	plci->nsf_control_bits = 0;
-	plci->ncpi_state = 0x00;
-	plci->ncpi_buffer[0] = 0;
-
-	plci->requested_options_conn = 0;
-	plci->requested_options = 0;
-	plci->notifiedcall = 0;
-	plci->vswitchstate = 0;
-	plci->vsprot = 0;
-	plci->vsprotdialect = 0;
-	init_b1_config(plci);
-	dbug(1, dprintf("get_plci(%x)", plci->Id));
-	return i + 1;
-}
-
-/*------------------------------------------------------------------*/
-/* put a parameter in the parameter buffer                          */
-/*------------------------------------------------------------------*/
-
-static void add_p(PLCI *plci, byte code, byte *p)
-{
-	word p_length;
-
-	p_length = 0;
-	if (p) p_length = p[0];
-	add_ie(plci, code, p, p_length);
-}
-
-/*------------------------------------------------------------------*/
-/* put a structure in the parameter buffer                          */
-/*------------------------------------------------------------------*/
-static void add_s(PLCI *plci, byte code, API_PARSE *p)
-{
-	if (p) add_ie(plci, code, p->info, (word)p->length);
-}
-
-/*------------------------------------------------------------------*/
-/* put multiple structures in the parameter buffer                  */
-/*------------------------------------------------------------------*/
-static void add_ss(PLCI *plci, byte code, API_PARSE *p)
-{
-	byte i;
-
-	if (p) {
-		dbug(1, dprintf("add_ss(%x,len=%d)", code, p->length));
-		for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
-			dbug(1, dprintf("add_ss_ie(%x,len=%d)", p->info[i - 1], p->info[i]));
-			add_ie(plci, p->info[i - 1], (byte *)&(p->info[i]), (word)p->info[i]);
-		}
-	}
-}
-
-/*------------------------------------------------------------------*/
-/* return the channel number sent by the application in a esc_chi   */
-/*------------------------------------------------------------------*/
-static byte getChannel(API_PARSE *p)
-{
-	byte i;
-
-	if (p) {
-		for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
-			if (p->info[i] == 2) {
-				if (p->info[i - 1] == ESC && p->info[i + 1] == CHI) return (p->info[i + 2]);
-			}
-		}
-	}
-	return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/* put an information element in the parameter buffer               */
-/*------------------------------------------------------------------*/
-
-static void add_ie(PLCI *plci, byte code, byte *p, word p_length)
-{
-	word i;
-
-	if (!(code & 0x80) && !p_length) return;
-
-	if (plci->req_in == plci->req_in_start) {
-		plci->req_in += 2;
-	}
-	else {
-		plci->req_in--;
-	}
-	plci->RBuffer[plci->req_in++] = code;
-
-	if (p) {
-		plci->RBuffer[plci->req_in++] = (byte)p_length;
-		for (i = 0; i < p_length; i++) plci->RBuffer[plci->req_in++] = p[1 + i];
-	}
-
-	plci->RBuffer[plci->req_in++] = 0;
-}
-
-/*------------------------------------------------------------------*/
-/* put a unstructured data into the buffer                          */
-/*------------------------------------------------------------------*/
-
-static void add_d(PLCI *plci, word length, byte *p)
-{
-	word i;
-
-	if (plci->req_in == plci->req_in_start) {
-		plci->req_in += 2;
-	}
-	else {
-		plci->req_in--;
-	}
-	for (i = 0; i < length; i++) plci->RBuffer[plci->req_in++] = p[i];
-}
-
-/*------------------------------------------------------------------*/
-/* put parameters from the Additional Info parameter in the         */
-/* parameter buffer                                                 */
-/*------------------------------------------------------------------*/
-
-static void add_ai(PLCI *plci, API_PARSE *ai)
-{
-	word i;
-	API_PARSE ai_parms[5];
-
-	for (i = 0; i < 5; i++) ai_parms[i].length = 0;
-
-	if (!ai->length)
-		return;
-	if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
-		return;
-
-	add_s(plci, KEY, &ai_parms[1]);
-	add_s(plci, UUI, &ai_parms[2]);
-	add_ss(plci, FTY, &ai_parms[3]);
-}
-
-/*------------------------------------------------------------------*/
-/* put parameter for b1 protocol in the parameter buffer            */
-/*------------------------------------------------------------------*/
-
-static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
-		   word b1_facilities)
-{
-	API_PARSE bp_parms[8];
-	API_PARSE mdm_cfg[9];
-	API_PARSE global_config[2];
-	byte cai[256];
-	byte resource[] = {5, 9, 13, 12, 16, 39, 9, 17, 17, 18};
-	byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08";
-	word i;
-
-	API_PARSE mdm_cfg_v18[4];
-	word j, n, w;
-	dword d;
-
-
-	for (i = 0; i < 8; i++) bp_parms[i].length = 0;
-	for (i = 0; i < 2; i++) global_config[i].length = 0;
-
-	dbug(1, dprintf("add_b1"));
-	api_save_msg(bp, "s", &plci->B_protocol);
-
-	if (b_channel_info == 2) {
-		plci->B1_resource = 0;
-		adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
-		add_p(plci, CAI, "\x01\x00");
-		dbug(1, dprintf("Cai=1,0 (no resource)"));
-		return 0;
-	}
-
-	if (plci->tel == CODEC_PERMANENT) return 0;
-	else if (plci->tel == CODEC) {
-		plci->B1_resource = 1;
-		adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
-		add_p(plci, CAI, "\x01\x01");
-		dbug(1, dprintf("Cai=1,1 (Codec)"));
-		return 0;
-	}
-	else if (plci->tel == ADV_VOICE) {
-		plci->B1_resource = add_b1_facilities(plci, 9, (word)(b1_facilities | B1_FACILITY_VOICE));
-		adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities | B1_FACILITY_VOICE));
-		voice_cai[1] = plci->B1_resource;
-		PUT_WORD(&voice_cai[5], plci->appl->MaxDataLength);
-		add_p(plci, CAI, voice_cai);
-		dbug(1, dprintf("Cai=1,0x%x (AdvVoice)", voice_cai[1]));
-		return 0;
-	}
-	plci->call_dir &= ~(CALL_DIR_ORIGINATE | CALL_DIR_ANSWER);
-	if (plci->call_dir & CALL_DIR_OUT)
-		plci->call_dir |= CALL_DIR_ORIGINATE;
-	else if (plci->call_dir & CALL_DIR_IN)
-		plci->call_dir |= CALL_DIR_ANSWER;
-
-	if (!bp->length) {
-		plci->B1_resource = 0x5;
-		adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
-		add_p(plci, CAI, "\x01\x05");
-		return 0;
-	}
-
-	dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
-	if (bp->length > 256) return _WRONG_MESSAGE_FORMAT;
-	if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
-	{
-		bp_parms[6].length = 0;
-		if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
-		{
-			dbug(1, dprintf("b-form.!"));
-			return _WRONG_MESSAGE_FORMAT;
-		}
-	}
-	else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
-	{
-		dbug(1, dprintf("b-form.!"));
-		return _WRONG_MESSAGE_FORMAT;
-	}
-
-	if (bp_parms[6].length)
-	{
-		if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
-		{
-			return _WRONG_MESSAGE_FORMAT;
-		}
-		switch (GET_WORD(global_config[0].info))
-		{
-		case 1:
-			plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
-			break;
-		case 2:
-			plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
-			break;
-		}
-	}
-	dbug(1, dprintf("call_dir=%04x", plci->call_dir));
-
-
-	if ((GET_WORD(bp_parms[0].info) == B1_RTP)
-	    && (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
-	{
-		plci->B1_resource = add_b1_facilities(plci, 31, (word)(b1_facilities & ~B1_FACILITY_VOICE));
-		adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
-		cai[1] = plci->B1_resource;
-		cai[2] = 0;
-		cai[3] = 0;
-		cai[4] = 0;
-		PUT_WORD(&cai[5], plci->appl->MaxDataLength);
-		for (i = 0; i < bp_parms[3].length; i++)
-			cai[7 + i] = bp_parms[3].info[1 + i];
-		cai[0] = 6 + bp_parms[3].length;
-		add_p(plci, CAI, cai);
-		return 0;
-	}
-
-
-	if ((GET_WORD(bp_parms[0].info) == B1_PIAFS)
-	    && (plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))
-	{
-		plci->B1_resource = add_b1_facilities(plci, 35/* PIAFS HARDWARE FACILITY */, (word)(b1_facilities & ~B1_FACILITY_VOICE));
-		adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
-		cai[1] = plci->B1_resource;
-		cai[2] = 0;
-		cai[3] = 0;
-		cai[4] = 0;
-		PUT_WORD(&cai[5], plci->appl->MaxDataLength);
-		cai[0] = 6;
-		add_p(plci, CAI, cai);
-		return 0;
-	}
-
-
-	if ((GET_WORD(bp_parms[0].info) >= 32)
-	    || (!((1L << GET_WORD(bp_parms[0].info)) & plci->adapter->profile.B1_Protocols)
-		&& ((GET_WORD(bp_parms[0].info) != 3)
-		    || !((1L << B1_HDLC) & plci->adapter->profile.B1_Protocols)
-		    || ((bp_parms[3].length != 0) && (GET_WORD(&bp_parms[3].info[1]) != 0) && (GET_WORD(&bp_parms[3].info[1]) != 56000)))))
-	{
-		return _B1_NOT_SUPPORTED;
-	}
-	plci->B1_resource = add_b1_facilities(plci, resource[GET_WORD(bp_parms[0].info)],
-					      (word)(b1_facilities & ~B1_FACILITY_VOICE));
-	adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
-	cai[0] = 6;
-	cai[1] = plci->B1_resource;
-	for (i = 2; i < sizeof(cai); i++) cai[i] = 0;
-
-	if ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
-	    || (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
-	    || (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC))
-	{ /* B1 - modem */
-		for (i = 0; i < 7; i++) mdm_cfg[i].length = 0;
-
-		if (bp_parms[3].length)
-		{
-			if (api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwww", mdm_cfg))
-			{
-				return (_WRONG_MESSAGE_FORMAT);
-			}
-
-			cai[2] = 0; /* Bit rate for adaptation */
-
-			dbug(1, dprintf("MDM Max Bit Rate:<%d>", GET_WORD(mdm_cfg[0].info)));
-
-			PUT_WORD(&cai[13], 0);                          /* Min Tx speed */
-			PUT_WORD(&cai[15], GET_WORD(mdm_cfg[0].info)); /* Max Tx speed */
-			PUT_WORD(&cai[17], 0);                          /* Min Rx speed */
-			PUT_WORD(&cai[19], GET_WORD(mdm_cfg[0].info)); /* Max Rx speed */
-
-			cai[3] = 0; /* Async framing parameters */
-			switch (GET_WORD(mdm_cfg[2].info))
-			{       /* Parity     */
-			case 1: /* odd parity */
-				cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
-				dbug(1, dprintf("MDM: odd parity"));
-				break;
-
-			case 2: /* even parity */
-				cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
-				dbug(1, dprintf("MDM: even parity"));
-				break;
-
-			default:
-				dbug(1, dprintf("MDM: no parity"));
-				break;
-			}
-
-			switch (GET_WORD(mdm_cfg[3].info))
-			{       /* stop bits   */
-			case 1: /* 2 stop bits */
-				cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
-				dbug(1, dprintf("MDM: 2 stop bits"));
-				break;
-
-			default:
-				dbug(1, dprintf("MDM: 1 stop bit"));
-				break;
-			}
-
-			switch (GET_WORD(mdm_cfg[1].info))
-			{     /* char length */
-			case 5:
-				cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
-				dbug(1, dprintf("MDM: 5 bits"));
-				break;
-
-			case 6:
-				cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
-				dbug(1, dprintf("MDM: 6 bits"));
-				break;
-
-			case 7:
-				cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
-				dbug(1, dprintf("MDM: 7 bits"));
-				break;
-
-			default:
-				dbug(1, dprintf("MDM: 8 bits"));
-				break;
-			}
-
-			cai[7] = 0; /* Line taking options */
-			cai[8] = 0; /* Modulation negotiation options */
-			cai[9] = 0; /* Modulation options */
-
-			if (((plci->call_dir & CALL_DIR_ORIGINATE) != 0) ^ ((plci->call_dir & CALL_DIR_OUT) != 0))
-			{
-				cai[9] |= DSP_CAI_MODEM_REVERSE_DIRECTION;
-				dbug(1, dprintf("MDM: Reverse direction"));
-			}
-
-			if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RETRAIN)
-			{
-				cai[9] |= DSP_CAI_MODEM_DISABLE_RETRAIN;
-				dbug(1, dprintf("MDM: Disable retrain"));
-			}
-
-			if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RING_TONE)
-			{
-				cai[7] |= DSP_CAI_MODEM_DISABLE_CALLING_TONE | DSP_CAI_MODEM_DISABLE_ANSWER_TONE;
-				dbug(1, dprintf("MDM: Disable ring tone"));
-			}
-
-			if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_1800)
-			{
-				cai[8] |= DSP_CAI_MODEM_GUARD_TONE_1800HZ;
-				dbug(1, dprintf("MDM: 1800 guard tone"));
-			}
-			else if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_550)
-			{
-				cai[8] |= DSP_CAI_MODEM_GUARD_TONE_550HZ;
-				dbug(1, dprintf("MDM: 550 guard tone"));
-			}
-
-			if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_V100)
-			{
-				cai[8] |= DSP_CAI_MODEM_NEGOTIATE_V100;
-				dbug(1, dprintf("MDM: V100"));
-			}
-			else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_MOD_CLASS)
-			{
-				cai[8] |= DSP_CAI_MODEM_NEGOTIATE_IN_CLASS;
-				dbug(1, dprintf("MDM: IN CLASS"));
-			}
-			else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_DISABLED)
-			{
-				cai[8] |= DSP_CAI_MODEM_NEGOTIATE_DISABLED;
-				dbug(1, dprintf("MDM: DISABLED"));
-			}
-			cai[0] = 20;
-
-			if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_V18))
-			    && (GET_WORD(mdm_cfg[5].info) & 0x8000)) /* Private V.18 enable */
-			{
-				plci->requested_options |= 1L << PRIVATE_V18;
-			}
-			if (GET_WORD(mdm_cfg[5].info) & 0x4000) /* Private VOWN enable */
-				plci->requested_options |= 1L << PRIVATE_VOWN;
-
-			if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
-			    & ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
-			{
-				if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwws", mdm_cfg))
-				{
-					i = 27;
-					if (mdm_cfg[6].length >= 4)
-					{
-						d = GET_DWORD(&mdm_cfg[6].info[1]);
-						cai[7] |= (byte) d;          /* line taking options */
-						cai[9] |= (byte)(d >> 8);    /* modulation options */
-						cai[++i] = (byte)(d >> 16);  /* vown modulation options */
-						cai[++i] = (byte)(d >> 24);
-						if (mdm_cfg[6].length >= 8)
-						{
-							d = GET_DWORD(&mdm_cfg[6].info[5]);
-							cai[10] |= (byte) d;        /* disabled modulations mask */
-							cai[11] |= (byte)(d >> 8);
-							if (mdm_cfg[6].length >= 12)
-							{
-								d = GET_DWORD(&mdm_cfg[6].info[9]);
-								cai[12] = (byte) d;          /* enabled modulations mask */
-								cai[++i] = (byte)(d >> 8);   /* vown enabled modulations */
-								cai[++i] = (byte)(d >> 16);
-								cai[++i] = (byte)(d >> 24);
-								cai[++i] = 0;
-								if (mdm_cfg[6].length >= 14)
-								{
-									w = GET_WORD(&mdm_cfg[6].info[13]);
-									if (w != 0)
-										PUT_WORD(&cai[13], w);  /* min tx speed */
-									if (mdm_cfg[6].length >= 16)
-									{
-										w = GET_WORD(&mdm_cfg[6].info[15]);
-										if (w != 0)
-											PUT_WORD(&cai[15], w);  /* max tx speed */
-										if (mdm_cfg[6].length >= 18)
-										{
-											w = GET_WORD(&mdm_cfg[6].info[17]);
-											if (w != 0)
-												PUT_WORD(&cai[17], w);  /* min rx speed */
-											if (mdm_cfg[6].length >= 20)
-											{
-												w = GET_WORD(&mdm_cfg[6].info[19]);
-												if (w != 0)
-													PUT_WORD(&cai[19], w);  /* max rx speed */
-												if (mdm_cfg[6].length >= 22)
-												{
-													w = GET_WORD(&mdm_cfg[6].info[21]);
-													cai[23] = (byte)(-((short) w));  /* transmit level */
-													if (mdm_cfg[6].length >= 24)
-													{
-														w = GET_WORD(&mdm_cfg[6].info[23]);
-														cai[22] |= (byte) w;        /* info options mask */
-														cai[21] |= (byte)(w >> 8);  /* disabled symbol rates */
-													}
-												}
-											}
-										}
-									}
-								}
-							}
-						}
-					}
-					cai[27] = i - 27;
-					i++;
-					if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwwss", mdm_cfg))
-					{
-						if (!api_parse(&mdm_cfg[7].info[1], (word)mdm_cfg[7].length, "sss", mdm_cfg_v18))
-						{
-							for (n = 0; n < 3; n++)
-							{
-								cai[i] = (byte)(mdm_cfg_v18[n].length);
-								for (j = 1; j < ((word)(cai[i] + 1)); j++)
-									cai[i + j] = mdm_cfg_v18[n].info[j];
-								i += cai[i] + 1;
-							}
-						}
-					}
-					cai[0] = (byte)(i - 1);
-				}
-			}
-
-		}
-	}
-	if (GET_WORD(bp_parms[0].info) == 2 ||                         /* V.110 async */
-	    GET_WORD(bp_parms[0].info) == 3)                           /* V.110 sync */
-	{
-		if (bp_parms[3].length) {
-			dbug(1, dprintf("V.110,%d", GET_WORD(&bp_parms[3].info[1])));
-			switch (GET_WORD(&bp_parms[3].info[1])) {                 /* Rate */
-			case 0:
-			case 56000:
-				if (GET_WORD(bp_parms[0].info) == 3) {                  /* V.110 sync 56k */
-					dbug(1, dprintf("56k sync HSCX"));
-					cai[1] = 8;
-					cai[2] = 0;
-					cai[3] = 0;
-				}
-				else if (GET_WORD(bp_parms[0].info) == 2) {
-					dbug(1, dprintf("56k async DSP"));
-					cai[2] = 9;
-				}
-				break;
-			case 50:     cai[2] = 1;  break;
-			case 75:     cai[2] = 1;  break;
-			case 110:    cai[2] = 1;  break;
-			case 150:    cai[2] = 1;  break;
-			case 200:    cai[2] = 1;  break;
-			case 300:    cai[2] = 1;  break;
-			case 600:    cai[2] = 1;  break;
-			case 1200:   cai[2] = 2;  break;
-			case 2400:   cai[2] = 3;  break;
-			case 4800:   cai[2] = 4;  break;
-			case 7200:   cai[2] = 10; break;
-			case 9600:   cai[2] = 5;  break;
-			case 12000:  cai[2] = 13; break;
-			case 24000:  cai[2] = 0;  break;
-			case 14400:  cai[2] = 11; break;
-			case 19200:  cai[2] = 6;  break;
-			case 28800:  cai[2] = 12; break;
-			case 38400:  cai[2] = 7;  break;
-			case 48000:  cai[2] = 8;  break;
-			case 76:     cai[2] = 15; break;  /* 75/1200     */
-			case 1201:   cai[2] = 14; break;  /* 1200/75     */
-			case 56001:  cai[2] = 9;  break;  /* V.110 56000 */
-
-			default:
-				return _B1_PARM_NOT_SUPPORTED;
-			}
-			cai[3] = 0;
-			if (cai[1] == 13)                                        /* v.110 async */
-			{
-				if (bp_parms[3].length >= 8)
-				{
-					switch (GET_WORD(&bp_parms[3].info[3]))
-					{       /* char length */
-					case 5:
-						cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
-						break;
-					case 6:
-						cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
-						break;
-					case 7:
-						cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
-						break;
-					}
-					switch (GET_WORD(&bp_parms[3].info[5]))
-					{       /* Parity     */
-					case 1: /* odd parity */
-						cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
-						break;
-					case 2: /* even parity */
-						cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
-						break;
-					}
-					switch (GET_WORD(&bp_parms[3].info[7]))
-					{       /* stop bits   */
-					case 1: /* 2 stop bits */
-						cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
-						break;
-					}
-				}
-			}
-		}
-		else if (cai[1] == 8 || GET_WORD(bp_parms[0].info) == 3) {
-			dbug(1, dprintf("V.110 default 56k sync"));
-			cai[1] = 8;
-			cai[2] = 0;
-			cai[3] = 0;
-		}
-		else {
-			dbug(1, dprintf("V.110 default 9600 async"));
-			cai[2] = 5;
-		}
-	}
-	PUT_WORD(&cai[5], plci->appl->MaxDataLength);
-	dbug(1, dprintf("CAI[%d]=%x,%x,%x,%x,%x,%x", cai[0], cai[1], cai[2], cai[3], cai[4], cai[5], cai[6]));
-/* HexDump ("CAI", sizeof(cai), &cai[0]); */
-
-	add_p(plci, CAI, cai);
-	return 0;
-}
-
-/*------------------------------------------------------------------*/
-/* put parameter for b2 and B3  protocol in the parameter buffer    */
-/*------------------------------------------------------------------*/
-
-static word add_b23(PLCI *plci, API_PARSE *bp)
-{
-	word i, fax_control_bits;
-	byte pos, len;
-	byte SAPI = 0x40;  /* default SAPI 16 for x.31 */
-	API_PARSE bp_parms[8];
-	API_PARSE *b1_config;
-	API_PARSE *b2_config;
-	API_PARSE b2_config_parms[8];
-	API_PARSE *b3_config;
-	API_PARSE b3_config_parms[6];
-	API_PARSE global_config[2];
-
-	static byte llc[3] = {2,0,0};
-	static byte dlc[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-	static byte nlc[256];
-	static byte lli[12] = {1,1};
-
-	const byte llc2_out[] = {1,2,4,6,2,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
-	const byte llc2_in[]  = {1,3,4,6,3,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
-
-	const byte llc3[] = {4,3,2,2,6,6,0};
-	const byte header[] = {0,2,3,3,0,0,0};
-
-	for (i = 0; i < 8; i++) bp_parms[i].length = 0;
-	for (i = 0; i < 6; i++) b2_config_parms[i].length = 0;
-	for (i = 0; i < 5; i++) b3_config_parms[i].length = 0;
-
-	lli[0] = 1;
-	lli[1] = 1;
-	if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
-		lli[1] |= 2;
-	if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
-		lli[1] |= 4;
-
-	if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
-		lli[1] |= 0x10;
-		if (plci->rx_dma_descriptor <= 0) {
-			plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
-			if (plci->rx_dma_descriptor >= 0)
-				plci->rx_dma_descriptor++;
-		}
-		if (plci->rx_dma_descriptor > 0) {
-			lli[0] = 6;
-			lli[1] |= 0x40;
-			lli[2] = (byte)(plci->rx_dma_descriptor - 1);
-			lli[3] = (byte)plci->rx_dma_magic;
-			lli[4] = (byte)(plci->rx_dma_magic >>  8);
-			lli[5] = (byte)(plci->rx_dma_magic >> 16);
-			lli[6] = (byte)(plci->rx_dma_magic >> 24);
-		}
-	}
-
-	if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
-		lli[1] |= 0x20;
-	}
-
-	dbug(1, dprintf("add_b23"));
-	api_save_msg(bp, "s", &plci->B_protocol);
-
-	if (!bp->length && plci->tel)
-	{
-		plci->adv_nl = true;
-		dbug(1, dprintf("Default adv.Nl"));
-		add_p(plci, LLI, lli);
-		plci->B2_prot = 1 /*XPARENT*/;
-		plci->B3_prot = 0 /*XPARENT*/;
-		llc[1] = 2;
-		llc[2] = 4;
-		add_p(plci, LLC, llc);
-		dlc[0] = 2;
-		PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
-		add_p(plci, DLC, dlc);
-		return 0;
-	}
-
-	if (!bp->length) /*default*/
-	{
-		dbug(1, dprintf("ret default"));
-		add_p(plci, LLI, lli);
-		plci->B2_prot = 0 /*X.75   */;
-		plci->B3_prot = 0 /*XPARENT*/;
-		llc[1] = 1;
-		llc[2] = 4;
-		add_p(plci, LLC, llc);
-		dlc[0] = 2;
-		PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
-		add_p(plci, DLC, dlc);
-		return 0;
-	}
-	dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
-	if ((word)bp->length > 256)    return _WRONG_MESSAGE_FORMAT;
-
-	if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
-	{
-		bp_parms[6].length = 0;
-		if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
-		{
-			dbug(1, dprintf("b-form.!"));
-			return _WRONG_MESSAGE_FORMAT;
-		}
-	}
-	else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
-	{
-		dbug(1, dprintf("b-form.!"));
-		return _WRONG_MESSAGE_FORMAT;
-	}
-
-	if (plci->tel == ADV_VOICE) /* transparent B on advanced voice */
-	{
-		if (GET_WORD(bp_parms[1].info) != 1
-		    || GET_WORD(bp_parms[2].info) != 0) return _B2_NOT_SUPPORTED;
-		plci->adv_nl = true;
-	}
-	else if (plci->tel) return _B2_NOT_SUPPORTED;
-
-
-	if ((GET_WORD(bp_parms[1].info) == B2_RTP)
-	    && (GET_WORD(bp_parms[2].info) == B3_RTP)
-	    && (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
-	{
-		add_p(plci, LLI, lli);
-		plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
-		plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
-		llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ? 14 : 13;
-		llc[2] = 4;
-		add_p(plci, LLC, llc);
-		dlc[0] = 2;
-		PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
-		dlc[3] = 3; /* Addr A */
-		dlc[4] = 1; /* Addr B */
-		dlc[5] = 7; /* modulo mode */
-		dlc[6] = 7; /* window size */
-		dlc[7] = 0; /* XID len Lo  */
-		dlc[8] = 0; /* XID len Hi  */
-		for (i = 0; i < bp_parms[4].length; i++)
-			dlc[9 + i] = bp_parms[4].info[1 + i];
-		dlc[0] = (byte)(8 + bp_parms[4].length);
-		add_p(plci, DLC, dlc);
-		for (i = 0; i < bp_parms[5].length; i++)
-			nlc[1 + i] = bp_parms[5].info[1 + i];
-		nlc[0] = (byte)(bp_parms[5].length);
-		add_p(plci, NLC, nlc);
-		return 0;
-	}
-
-
-
-	if ((GET_WORD(bp_parms[1].info) >= 32)
-	    || (!((1L << GET_WORD(bp_parms[1].info)) & plci->adapter->profile.B2_Protocols)
-		&& ((GET_WORD(bp_parms[1].info) != B2_PIAFS)
-		    || !(plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))))
-
-	{
-		return _B2_NOT_SUPPORTED;
-	}
-	if ((GET_WORD(bp_parms[2].info) >= 32)
-	    || !((1L << GET_WORD(bp_parms[2].info)) & plci->adapter->profile.B3_Protocols))
-	{
-		return _B3_NOT_SUPPORTED;
-	}
-	if ((GET_WORD(bp_parms[1].info) != B2_SDLC)
-	    && ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
-		|| (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
-		|| (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC)))
-	{
-		return (add_modem_b23(plci, bp_parms));
-	}
-
-	add_p(plci, LLI, lli);
-
-	plci->B2_prot = (byte)GET_WORD(bp_parms[1].info);
-	plci->B3_prot = (byte)GET_WORD(bp_parms[2].info);
-	if (plci->B2_prot == 12) SAPI = 0; /* default SAPI D-channel */
-
-	if (bp_parms[6].length)
-	{
-		if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
-		{
-			return _WRONG_MESSAGE_FORMAT;
-		}
-		switch (GET_WORD(global_config[0].info))
-		{
-		case 1:
-			plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
-			break;
-		case 2:
-			plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
-			break;
-		}
-	}
-	dbug(1, dprintf("call_dir=%04x", plci->call_dir));
-
-
-	if (plci->B2_prot == B2_PIAFS)
-		llc[1] = PIAFS_CRC;
-	else
-/* IMPLEMENT_PIAFS */
-	{
-		llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
-			llc2_out[GET_WORD(bp_parms[1].info)] : llc2_in[GET_WORD(bp_parms[1].info)];
-	}
-	llc[2] = llc3[GET_WORD(bp_parms[2].info)];
-
-	add_p(plci, LLC, llc);
-
-	dlc[0] = 2;
-	PUT_WORD(&dlc[1], plci->appl->MaxDataLength +
-		 header[GET_WORD(bp_parms[2].info)]);
-
-	b1_config = &bp_parms[3];
-	nlc[0] = 0;
-	if (plci->B3_prot == 4
-	    || plci->B3_prot == 5)
-	{
-		for (i = 0; i < sizeof(T30_INFO); i++) nlc[i] = 0;
-		nlc[0] = sizeof(T30_INFO);
-		if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
-			((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI;
-		((T30_INFO *)&nlc[1])->rate_div_2400 = 0xff;
-		if (b1_config->length >= 2)
-		{
-			((T30_INFO *)&nlc[1])->rate_div_2400 = (byte)(GET_WORD(&b1_config->info[1]) / 2400);
-		}
-	}
-	b2_config = &bp_parms[4];
-
-
-	if (llc[1] == PIAFS_CRC)
-	{
-		if (plci->B3_prot != B3_TRANSPARENT)
-		{
-			return _B_STACK_NOT_SUPPORTED;
-		}
-		if (b2_config->length && api_parse(&b2_config->info[1], (word)b2_config->length, "bwww", b2_config_parms)) {
-			return _WRONG_MESSAGE_FORMAT;
-		}
-		PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
-		dlc[3] = 0; /* Addr A */
-		dlc[4] = 0; /* Addr B */
-		dlc[5] = 0; /* modulo mode */
-		dlc[6] = 0; /* window size */
-		if (b2_config->length >= 7) {
-			dlc[7] = 7;
-			dlc[8] = 0;
-			dlc[9] = b2_config_parms[0].info[0]; /* PIAFS protocol Speed configuration */
-			dlc[10] = b2_config_parms[1].info[0]; /* V.42bis P0 */
-			dlc[11] = b2_config_parms[1].info[1]; /* V.42bis P0 */
-			dlc[12] = b2_config_parms[2].info[0]; /* V.42bis P1 */
-			dlc[13] = b2_config_parms[2].info[1]; /* V.42bis P1 */
-			dlc[14] = b2_config_parms[3].info[0]; /* V.42bis P2 */
-			dlc[15] = b2_config_parms[3].info[1]; /* V.42bis P2 */
-			dlc[0] = 15;
-			if (b2_config->length >= 8) { /* PIAFS control abilities */
-				dlc[7] = 10;
-				dlc[16] = 2; /* Length of PIAFS extension */
-				dlc[17] = PIAFS_UDATA_ABILITIES; /* control (UDATA) ability */
-				dlc[18] = b2_config_parms[4].info[0]; /* value */
-				dlc[0] = 18;
-			}
-		}
-		else /* default values, 64K, variable, no compression */
-		{
-			dlc[7] = 7;
-			dlc[8] = 0;
-			dlc[9] = 0x03; /* PIAFS protocol Speed configuration */
-			dlc[10] = 0x03; /* V.42bis P0 */
-			dlc[11] = 0;    /* V.42bis P0 */
-			dlc[12] = 0;    /* V.42bis P1 */
-			dlc[13] = 0;    /* V.42bis P1 */
-			dlc[14] = 0;    /* V.42bis P2 */
-			dlc[15] = 0;    /* V.42bis P2 */
-			dlc[0] = 15;
-		}
-		add_p(plci, DLC, dlc);
-	}
-	else
-
-		if ((llc[1] == V120_L2) || (llc[1] == V120_V42BIS))
-		{
-			if (plci->B3_prot != B3_TRANSPARENT)
-				return _B_STACK_NOT_SUPPORTED;
-
-			dlc[0] = 6;
-			PUT_WORD(&dlc[1], GET_WORD(&dlc[1]) + 2);
-			dlc[3] = 0x08;
-			dlc[4] = 0x01;
-			dlc[5] = 127;
-			dlc[6] = 7;
-			if (b2_config->length != 0)
-			{
-				if ((llc[1] == V120_V42BIS) && api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms)) {
-					return _WRONG_MESSAGE_FORMAT;
-				}
-				dlc[3] = (byte)((b2_config->info[2] << 3) | ((b2_config->info[1] >> 5) & 0x04));
-				dlc[4] = (byte)((b2_config->info[1] << 1) | 0x01);
-				if (b2_config->info[3] != 128)
-				{
-					dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
-					return _B2_PARM_NOT_SUPPORTED;
-				}
-				dlc[5] = (byte)(b2_config->info[3] - 1);
-				dlc[6] = b2_config->info[4];
-				if (llc[1] == V120_V42BIS) {
-					if (b2_config->length >= 10) {
-						dlc[7] = 6;
-						dlc[8] = 0;
-						dlc[9] = b2_config_parms[4].info[0];
-						dlc[10] = b2_config_parms[4].info[1];
-						dlc[11] = b2_config_parms[5].info[0];
-						dlc[12] = b2_config_parms[5].info[1];
-						dlc[13] = b2_config_parms[6].info[0];
-						dlc[14] = b2_config_parms[6].info[1];
-						dlc[0] = 14;
-						dbug(1, dprintf("b2_config_parms[4].info[0] [1]:  %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
-						dbug(1, dprintf("b2_config_parms[5].info[0] [1]:  %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
-						dbug(1, dprintf("b2_config_parms[6].info[0] [1]:  %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
-					}
-					else {
-						dlc[6] = 14;
-					}
-				}
-			}
-		}
-		else
-		{
-			if (b2_config->length)
-			{
-				dbug(1, dprintf("B2-Config"));
-				if (llc[1] == X75_V42BIS) {
-					if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms))
-					{
-						return _WRONG_MESSAGE_FORMAT;
-					}
-				}
-				else {
-					if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbs", b2_config_parms))
-					{
-						return _WRONG_MESSAGE_FORMAT;
-					}
-				}
-				/* if B2 Protocol is LAPD, b2_config structure is different */
-				if (llc[1] == 6)
-				{
-					dlc[0] = 4;
-					if (b2_config->length >= 1) dlc[2] = b2_config->info[1];      /* TEI */
-					else dlc[2] = 0x01;
-					if ((b2_config->length >= 2) && (plci->B2_prot == 12))
-					{
-						SAPI = b2_config->info[2];    /* SAPI */
-					}
-					dlc[1] = SAPI;
-					if ((b2_config->length >= 3) && (b2_config->info[3] == 128))
-					{
-						dlc[3] = 127;      /* Mode */
-					}
-					else
-					{
-						dlc[3] = 7;        /* Mode */
-					}
-
-					if (b2_config->length >= 4) dlc[4] = b2_config->info[4];      /* Window */
-					else dlc[4] = 1;
-					dbug(1, dprintf("D-dlc[%d]=%x,%x,%x,%x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
-					if (b2_config->length > 5) return _B2_PARM_NOT_SUPPORTED;
-				}
-				else
-				{
-					dlc[0] = (byte)(b2_config_parms[4].length + 6);
-					dlc[3] = b2_config->info[1];
-					dlc[4] = b2_config->info[2];
-					if (b2_config->info[3] != 8 && b2_config->info[3] != 128) {
-						dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
-						return _B2_PARM_NOT_SUPPORTED;
-					}
-
-					dlc[5] = (byte)(b2_config->info[3] - 1);
-					dlc[6] = b2_config->info[4];
-					if (dlc[6] > dlc[5]) {
-						dbug(1, dprintf("2D-dlc= %x %x %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4], dlc[5], dlc[6]));
-						return _B2_PARM_NOT_SUPPORTED;
-					}
-
-					if (llc[1] == X75_V42BIS) {
-						if (b2_config->length >= 10) {
-							dlc[7] = 6;
-							dlc[8] = 0;
-							dlc[9] = b2_config_parms[4].info[0];
-							dlc[10] = b2_config_parms[4].info[1];
-							dlc[11] = b2_config_parms[5].info[0];
-							dlc[12] = b2_config_parms[5].info[1];
-							dlc[13] = b2_config_parms[6].info[0];
-							dlc[14] = b2_config_parms[6].info[1];
-							dlc[0] = 14;
-							dbug(1, dprintf("b2_config_parms[4].info[0] [1]:  %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
-							dbug(1, dprintf("b2_config_parms[5].info[0] [1]:  %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
-							dbug(1, dprintf("b2_config_parms[6].info[0] [1]:  %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
-						}
-						else {
-							dlc[6] = 14;
-						}
-
-					}
-					else {
-						PUT_WORD(&dlc[7], (word)b2_config_parms[4].length);
-						for (i = 0; i < b2_config_parms[4].length; i++)
-							dlc[11 + i] = b2_config_parms[4].info[1 + i];
-					}
-				}
-			}
-		}
-	add_p(plci, DLC, dlc);
-
-	b3_config = &bp_parms[5];
-	if (b3_config->length)
-	{
-		if (plci->B3_prot == 4
-		    || plci->B3_prot == 5)
-		{
-			if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwss", b3_config_parms))
-			{
-				return _WRONG_MESSAGE_FORMAT;
-			}
-			i = GET_WORD((byte *)(b3_config_parms[0].info));
-			((T30_INFO *)&nlc[1])->resolution = (byte)(((i & 0x0001) ||
-								    ((plci->B3_prot == 4) && (((byte)(GET_WORD((byte *)b3_config_parms[1].info))) != 5))) ? T30_RESOLUTION_R8_0770_OR_200 : 0);
-			((T30_INFO *)&nlc[1])->data_format = (byte)(GET_WORD((byte *)b3_config_parms[1].info));
-			fax_control_bits = T30_CONTROL_BIT_ALL_FEATURES;
-			if ((((T30_INFO *)&nlc[1])->rate_div_2400 != 0) && (((T30_INFO *)&nlc[1])->rate_div_2400 <= 6))
-				fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_V34FAX;
-			if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
-			{
-
-				if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
-				    & (1L << PRIVATE_FAX_PAPER_FORMATS))
-				{
-					((T30_INFO *)&nlc[1])->resolution |= T30_RESOLUTION_R8_1540 |
-						T30_RESOLUTION_R16_1540_OR_400 | T30_RESOLUTION_300_300 |
-						T30_RESOLUTION_INCH_BASED | T30_RESOLUTION_METRIC_BASED;
-				}
-
-				((T30_INFO *)&nlc[1])->recording_properties =
-					T30_RECORDING_WIDTH_ISO_A3 |
-					(T30_RECORDING_LENGTH_UNLIMITED << 2) |
-					(T30_MIN_SCANLINE_TIME_00_00_00 << 4);
-			}
-			if (plci->B3_prot == 5)
-			{
-				if (i & 0x0002) /* Accept incoming fax-polling requests */
-					fax_control_bits |= T30_CONTROL_BIT_ACCEPT_POLLING;
-				if (i & 0x2000) /* Do not use MR compression */
-					fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_2D_CODING;
-				if (i & 0x4000) /* Do not use MMR compression */
-					fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_T6_CODING;
-				if (i & 0x8000) /* Do not use ECM */
-					fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_ECM;
-				if (plci->fax_connect_info_length != 0)
-				{
-					((T30_INFO *)&nlc[1])->resolution = ((T30_INFO *)plci->fax_connect_info_buffer)->resolution;
-					((T30_INFO *)&nlc[1])->data_format = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
-					((T30_INFO *)&nlc[1])->recording_properties = ((T30_INFO *)plci->fax_connect_info_buffer)->recording_properties;
-					fax_control_bits |= GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) &
-						(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
-				}
-			}
-			/* copy station id to NLC */
-			for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
-			{
-				if (i < b3_config_parms[2].length)
-				{
-					((T30_INFO *)&nlc[1])->station_id[i] = ((byte *)b3_config_parms[2].info)[1 + i];
-				}
-				else
-				{
-					((T30_INFO *)&nlc[1])->station_id[i] = ' ';
-				}
-			}
-			((T30_INFO *)&nlc[1])->station_id_len = T30_MAX_STATION_ID_LENGTH;
-			/* copy head line to NLC */
-			if (b3_config_parms[3].length)
-			{
-
-				pos = (byte)(fax_head_line_time(&(((T30_INFO *)&nlc[1])->station_id[T30_MAX_STATION_ID_LENGTH])));
-				if (pos != 0)
-				{
-					if (CAPI_MAX_DATE_TIME_LENGTH + 2 + b3_config_parms[3].length > CAPI_MAX_HEAD_LINE_SPACE)
-						pos = 0;
-					else
-					{
-						nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
-						nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
-						len = (byte)b3_config_parms[2].length;
-						if (len > 20)
-							len = 20;
-						if (CAPI_MAX_DATE_TIME_LENGTH + 2 + len + 2 + b3_config_parms[3].length <= CAPI_MAX_HEAD_LINE_SPACE)
-						{
-							for (i = 0; i < len; i++)
-								nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[2].info)[1 + i];
-							nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
-							nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
-						}
-					}
-				}
-
-				len = (byte)b3_config_parms[3].length;
-				if (len > CAPI_MAX_HEAD_LINE_SPACE - pos)
-					len = (byte)(CAPI_MAX_HEAD_LINE_SPACE - pos);
-				((T30_INFO *)&nlc[1])->head_line_len = (byte)(pos + len);
-				nlc[0] += (byte)(pos + len);
-				for (i = 0; i < len; i++)
-					nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] =  ((byte *)b3_config_parms[3].info)[1 + i];
-			} else
-				((T30_INFO *)&nlc[1])->head_line_len = 0;
-
-			plci->nsf_control_bits = 0;
-			if (plci->B3_prot == 5)
-			{
-				if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
-				    && (GET_WORD((byte *)b3_config_parms[1].info) & 0x8000)) /* Private SUB/SEP/PWD enable */
-				{
-					plci->requested_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
-				}
-				if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
-				    && (GET_WORD((byte *)b3_config_parms[1].info) & 0x4000)) /* Private non-standard facilities enable */
-				{
-					plci->requested_options |= 1L << PRIVATE_FAX_NONSTANDARD;
-				}
-				if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
-				    & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
-				{
-					if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
-					    & (1L << PRIVATE_FAX_SUB_SEP_PWD))
-					{
-						fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
-						if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
-							fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
-					}
-					len = nlc[0];
-					pos = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
-					if (pos < plci->fax_connect_info_length)
-					{
-						for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
-							nlc[++len] = plci->fax_connect_info_buffer[pos++];
-					}
-					else
-						nlc[++len] = 0;
-					if (pos < plci->fax_connect_info_length)
-					{
-						for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
-							nlc[++len] = plci->fax_connect_info_buffer[pos++];
-					}
-					else
-						nlc[++len] = 0;
-					if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
-					    & (1L << PRIVATE_FAX_NONSTANDARD))
-					{
-						if ((pos < plci->fax_connect_info_length) && (plci->fax_connect_info_buffer[pos] != 0))
-						{
-							if ((plci->fax_connect_info_buffer[pos] >= 3) && (plci->fax_connect_info_buffer[pos + 1] >= 2))
-								plci->nsf_control_bits = GET_WORD(&plci->fax_connect_info_buffer[pos + 2]);
-							for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
-								nlc[++len] = plci->fax_connect_info_buffer[pos++];
-						}
-						else
-						{
-							if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwsss", b3_config_parms))
-							{
-								dbug(1, dprintf("non-standard facilities info missing or wrong format"));
-								nlc[++len] = 0;
-							}
-							else
-							{
-								if ((b3_config_parms[4].length >= 3) && (b3_config_parms[4].info[1] >= 2))
-									plci->nsf_control_bits = GET_WORD(&b3_config_parms[4].info[2]);
-								nlc[++len] = (byte)(b3_config_parms[4].length);
-								for (i = 0; i < b3_config_parms[4].length; i++)
-									nlc[++len] = b3_config_parms[4].info[1 + i];
-							}
-						}
-					}
-					nlc[0] = len;
-					if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
-					    && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
-					{
-						((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI_NEG;
-					}
-				}
-			}
-
-			PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits);
-			len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
-			for (i = 0; i < len; i++)
-				plci->fax_connect_info_buffer[i] = nlc[1 + i];
-			((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0;
-			i += ((T30_INFO *)&nlc[1])->head_line_len;
-			while (i < nlc[0])
-				plci->fax_connect_info_buffer[len++] = nlc[++i];
-			plci->fax_connect_info_length = len;
-		}
-		else
-		{
-			nlc[0] = 14;
-			if (b3_config->length != 16)
-				return _B3_PARM_NOT_SUPPORTED;
-			for (i = 0; i < 12; i++) nlc[1 + i] = b3_config->info[1 + i];
-			if (GET_WORD(&b3_config->info[13]) != 8 && GET_WORD(&b3_config->info[13]) != 128)
-				return _B3_PARM_NOT_SUPPORTED;
-			nlc[13] = b3_config->info[13];
-			if (GET_WORD(&b3_config->info[15]) >= nlc[13])
-				return _B3_PARM_NOT_SUPPORTED;
-			nlc[14] = b3_config->info[15];
-		}
-	}
-	else
-	{
-		if (plci->B3_prot == 4
-		    || plci->B3_prot == 5 /*T.30 - FAX*/) return _B3_PARM_NOT_SUPPORTED;
-	}
-	add_p(plci, NLC, nlc);
-	return 0;
-}
-
-/*----------------------------------------------------------------*/
-/*      make the same as add_b23, but only for the modem related  */
-/*      L2 and L3 B-Chan protocol.                                */
-/*                                                                */
-/*      Enabled L2 and L3 Configurations:                         */
-/*        If L1 == Modem all negotiation                          */
-/*          only L2 == Modem with full negotiation is allowed     */
-/*        If L1 == Modem async or sync                            */
-/*          only L2 == Transparent is allowed                     */
-/*        L3 == Modem or L3 == Transparent are allowed            */
-/*      B2 Configuration for modem:                               */
-/*          word : enable/disable compression, bitoptions         */
-/*      B3 Configuration for modem:                               */
-/*          empty                                                 */
-/*----------------------------------------------------------------*/
-static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms)
-{
-	static byte lli[12] = {1,1};
-	static byte llc[3] = {2,0,0};
-	static byte dlc[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-	API_PARSE mdm_config[2];
-	word i;
-	word b2_config = 0;
-
-	for (i = 0; i < 2; i++) mdm_config[i].length = 0;
-	for (i = 0; i < sizeof(dlc); i++) dlc[i] = 0;
-
-	if (((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
-	     && (GET_WORD(bp_parms[1].info) != B2_MODEM_EC_COMPRESSION))
-	    || ((GET_WORD(bp_parms[0].info) != B1_MODEM_ALL_NEGOTIATE)
-		&& (GET_WORD(bp_parms[1].info) != B2_TRANSPARENT)))
-	{
-		return (_B_STACK_NOT_SUPPORTED);
-	}
-	if ((GET_WORD(bp_parms[2].info) != B3_MODEM)
-	    && (GET_WORD(bp_parms[2].info) != B3_TRANSPARENT))
-	{
-		return (_B_STACK_NOT_SUPPORTED);
-	}
-
-	plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
-	plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
-
-	if ((GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION) && bp_parms[4].length)
-	{
-		if (api_parse(&bp_parms[4].info[1],
-			      (word)bp_parms[4].length, "w",
-			      mdm_config))
-		{
-			return (_WRONG_MESSAGE_FORMAT);
-		}
-		b2_config = GET_WORD(mdm_config[0].info);
-	}
-
-	/* OK, L2 is modem */
-
-	lli[0] = 1;
-	lli[1] = 1;
-	if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
-		lli[1] |= 2;
-	if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
-		lli[1] |= 4;
-
-	if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
-		lli[1] |= 0x10;
-		if (plci->rx_dma_descriptor <= 0) {
-			plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
-			if (plci->rx_dma_descriptor >= 0)
-				plci->rx_dma_descriptor++;
-		}
-		if (plci->rx_dma_descriptor > 0) {
-			lli[1] |= 0x40;
-			lli[0] = 6;
-			lli[2] = (byte)(plci->rx_dma_descriptor - 1);
-			lli[3] = (byte)plci->rx_dma_magic;
-			lli[4] = (byte)(plci->rx_dma_magic >>  8);
-			lli[5] = (byte)(plci->rx_dma_magic >> 16);
-			lli[6] = (byte)(plci->rx_dma_magic >> 24);
-		}
-	}
-
-	if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
-		lli[1] |= 0x20;
-	}
-
-	llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
-		/*V42*/ 10 : /*V42_IN*/ 9;
-	llc[2] = 4;                      /* pass L3 always transparent */
-	add_p(plci, LLI, lli);
-	add_p(plci, LLC, llc);
-	i =  1;
-	PUT_WORD(&dlc[i], plci->appl->MaxDataLength);
-	i += 2;
-	if (GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION)
-	{
-		if (bp_parms[4].length)
-		{
-			dbug(1, dprintf("MDM b2_config=%02x", b2_config));
-			dlc[i++] = 3; /* Addr A */
-			dlc[i++] = 1; /* Addr B */
-			dlc[i++] = 7; /* modulo mode */
-			dlc[i++] = 7; /* window size */
-			dlc[i++] = 0; /* XID len Lo  */
-			dlc[i++] = 0; /* XID len Hi  */
-
-			if (b2_config & MDM_B2_DISABLE_V42bis)
-			{
-				dlc[i] |= DLC_MODEMPROT_DISABLE_V42_V42BIS;
-			}
-			if (b2_config & MDM_B2_DISABLE_MNP)
-			{
-				dlc[i] |= DLC_MODEMPROT_DISABLE_MNP_MNP5;
-			}
-			if (b2_config & MDM_B2_DISABLE_TRANS)
-			{
-				dlc[i] |= DLC_MODEMPROT_REQUIRE_PROTOCOL;
-			}
-			if (b2_config & MDM_B2_DISABLE_V42)
-			{
-				dlc[i] |= DLC_MODEMPROT_DISABLE_V42_DETECT;
-			}
-			if (b2_config & MDM_B2_DISABLE_COMP)
-			{
-				dlc[i] |= DLC_MODEMPROT_DISABLE_COMPRESSION;
-			}
-			i++;
-		}
-	}
-	else
-	{
-		dlc[i++] = 3; /* Addr A */
-		dlc[i++] = 1; /* Addr B */
-		dlc[i++] = 7; /* modulo mode */
-		dlc[i++] = 7; /* window size */
-		dlc[i++] = 0; /* XID len Lo  */
-		dlc[i++] = 0; /* XID len Hi  */
-		dlc[i++] = DLC_MODEMPROT_DISABLE_V42_V42BIS |
-			DLC_MODEMPROT_DISABLE_MNP_MNP5 |
-			DLC_MODEMPROT_DISABLE_V42_DETECT |
-			DLC_MODEMPROT_DISABLE_COMPRESSION;
-	}
-	dlc[0] = (byte)(i - 1);
-/* HexDump ("DLC", sizeof(dlc), &dlc[0]); */
-	add_p(plci, DLC, dlc);
-	return (0);
-}
-
-
-/*------------------------------------------------------------------*/
-/* send a request for the signaling entity                          */
-/*------------------------------------------------------------------*/
-
-static void sig_req(PLCI *plci, byte req, byte Id)
-{
-	if (!plci) return;
-	if (plci->adapter->adapter_disabled) return;
-	dbug(1, dprintf("sig_req(%x)", req));
-	if (req == REMOVE)
-		plci->sig_remove_id = plci->Sig.Id;
-	if (plci->req_in == plci->req_in_start) {
-		plci->req_in += 2;
-		plci->RBuffer[plci->req_in++] = 0;
-	}
-	PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
-	plci->RBuffer[plci->req_in++] = Id;   /* sig/nl flag */
-	plci->RBuffer[plci->req_in++] = req;  /* request */
-	plci->RBuffer[plci->req_in++] = 0;    /* channel */
-	plci->req_in_start = plci->req_in;
-}
-
-/*------------------------------------------------------------------*/
-/* send a request for the network layer entity                      */
-/*------------------------------------------------------------------*/
-
-static void nl_req_ncci(PLCI *plci, byte req, byte ncci)
-{
-	if (!plci) return;
-	if (plci->adapter->adapter_disabled) return;
-	dbug(1, dprintf("nl_req %02x %02x %02x", plci->Id, req, ncci));
-	if (req == REMOVE)
-	{
-		plci->nl_remove_id = plci->NL.Id;
-		ncci_remove(plci, 0, (byte)(ncci != 0));
-		ncci = 0;
-	}
-	if (plci->req_in == plci->req_in_start) {
-		plci->req_in += 2;
-		plci->RBuffer[plci->req_in++] = 0;
-	}
-	PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
-	plci->RBuffer[plci->req_in++] = 1;    /* sig/nl flag */
-	plci->RBuffer[plci->req_in++] = req;  /* request */
-	plci->RBuffer[plci->req_in++] = plci->adapter->ncci_ch[ncci];   /* channel */
-	plci->req_in_start = plci->req_in;
-}
-
-static void send_req(PLCI *plci)
-{
-	ENTITY *e;
-	word l;
-/*  word i; */
-
-	if (!plci) return;
-	if (plci->adapter->adapter_disabled) return;
-	channel_xmit_xon(plci);
-
-	/* if nothing to do, return */
-	if (plci->req_in == plci->req_out) return;
-	dbug(1, dprintf("send_req(in=%d,out=%d)", plci->req_in, plci->req_out));
-
-	if (plci->nl_req || plci->sig_req) return;
-
-	l = GET_WORD(&plci->RBuffer[plci->req_out]);
-	plci->req_out += 2;
-	plci->XData[0].P = &plci->RBuffer[plci->req_out];
-	plci->req_out += l;
-	if (plci->RBuffer[plci->req_out] == 1)
-	{
-		e = &plci->NL;
-		plci->req_out++;
-		e->Req = plci->nl_req = plci->RBuffer[plci->req_out++];
-		e->ReqCh = plci->RBuffer[plci->req_out++];
-		if (!(e->Id & 0x1f))
-		{
-			e->Id = NL_ID;
-			plci->RBuffer[plci->req_out - 4] = CAI;
-			plci->RBuffer[plci->req_out - 3] = 1;
-			plci->RBuffer[plci->req_out - 2] = (plci->Sig.Id == 0xff) ? 0 : plci->Sig.Id;
-			plci->RBuffer[plci->req_out - 1] = 0;
-			l += 3;
-			plci->nl_global_req = plci->nl_req;
-		}
-		dbug(1, dprintf("%x:NLREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
-	}
-	else
-	{
-		e = &plci->Sig;
-		if (plci->RBuffer[plci->req_out])
-			e->Id = plci->RBuffer[plci->req_out];
-		plci->req_out++;
-		e->Req = plci->sig_req = plci->RBuffer[plci->req_out++];
-		e->ReqCh = plci->RBuffer[plci->req_out++];
-		if (!(e->Id & 0x1f))
-			plci->sig_global_req = plci->sig_req;
-		dbug(1, dprintf("%x:SIGREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
-	}
-	plci->XData[0].PLength = l;
-	e->X = plci->XData;
-	plci->adapter->request(e);
-	dbug(1, dprintf("send_ok"));
-}
-
-static void send_data(PLCI *plci)
-{
-	DIVA_CAPI_ADAPTER *a;
-	DATA_B3_DESC *data;
-	NCCI   *ncci_ptr;
-	word ncci;
-
-	if (!plci->nl_req && plci->ncci_ring_list)
-	{
-		a = plci->adapter;
-		ncci = plci->ncci_ring_list;
-		do
-		{
-			ncci = a->ncci_next[ncci];
-			ncci_ptr = &(a->ncci[ncci]);
-			if (!(a->ncci_ch[ncci]
-			      && (a->ch_flow_control[a->ncci_ch[ncci]] & N_OK_FC_PENDING)))
-			{
-				if (ncci_ptr->data_pending)
-				{
-					if ((a->ncci_state[ncci] == CONNECTED)
-					    || (a->ncci_state[ncci] == INC_ACT_PENDING)
-					    || (plci->send_disc == ncci))
-					{
-						data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
-						if ((plci->B2_prot == B2_V120_ASYNC)
-						    || (plci->B2_prot == B2_V120_ASYNC_V42BIS)
-						    || (plci->B2_prot == B2_V120_BIT_TRANSPARENT))
-						{
-							plci->NData[1].P = TransmitBufferGet(plci->appl, data->P);
-							plci->NData[1].PLength = data->Length;
-							if (data->Flags & 0x10)
-								plci->NData[0].P = v120_break_header;
-							else
-								plci->NData[0].P = v120_default_header;
-							plci->NData[0].PLength = 1;
-							plci->NL.XNum = 2;
-							plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
-						}
-						else
-						{
-							plci->NData[0].P = TransmitBufferGet(plci->appl, data->P);
-							plci->NData[0].PLength = data->Length;
-							if (data->Flags & 0x10)
-								plci->NL.Req = plci->nl_req = (byte)N_UDATA;
-
-							else if ((plci->B3_prot == B3_RTP) && (data->Flags & 0x01))
-								plci->NL.Req = plci->nl_req = (byte)N_BDATA;
-
-							else
-								plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
-						}
-						plci->NL.X = plci->NData;
-						plci->NL.ReqCh = a->ncci_ch[ncci];
-						dbug(1, dprintf("%x:DREQ(%x:%x)", a->Id, plci->NL.Id, plci->NL.Req));
-						plci->data_sent = true;
-						plci->data_sent_ptr = data->P;
-						a->request(&plci->NL);
-					}
-					else {
-						cleanup_ncci_data(plci, ncci);
-					}
-				}
-				else if (plci->send_disc == ncci)
-				{
-					/* dprintf("N_DISC"); */
-					plci->NData[0].PLength = 0;
-					plci->NL.ReqCh = a->ncci_ch[ncci];
-					plci->NL.Req = plci->nl_req = N_DISC;
-					a->request(&plci->NL);
-					plci->command = _DISCONNECT_B3_R;
-					plci->send_disc = 0;
-				}
-			}
-		} while (!plci->nl_req && (ncci != plci->ncci_ring_list));
-		plci->ncci_ring_list = ncci;
-	}
-}
-
-static void listen_check(DIVA_CAPI_ADAPTER *a)
-{
-	word i, j;
-	PLCI *plci;
-	byte activnotifiedcalls = 0;
-
-	dbug(1, dprintf("listen_check(%d,%d)", a->listen_active, a->max_listen));
-	if (!remove_started && !a->adapter_disabled)
-	{
-		for (i = 0; i < a->max_plci; i++)
-		{
-			plci = &(a->plci[i]);
-			if (plci->notifiedcall) activnotifiedcalls++;
-		}
-		dbug(1, dprintf("listen_check(%d)", activnotifiedcalls));
-
-		for (i = a->listen_active; i < ((word)(a->max_listen + activnotifiedcalls)); i++) {
-			if ((j = get_plci(a))) {
-				a->listen_active++;
-				plci = &a->plci[j - 1];
-				plci->State = LISTENING;
-
-				add_p(plci, OAD, "\x01\xfd");
-
-				add_p(plci, KEY, "\x04\x43\x41\x32\x30");
-
-				add_p(plci, CAI, "\x01\xc0");
-				add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-				add_p(plci, LLI, "\x01\xc4");                  /* support Dummy CR FAC + MWI + SpoofNotify */
-				add_p(plci, SHIFT | 6, NULL);
-				add_p(plci, SIN, "\x02\x00\x00");
-				plci->internal_command = LISTEN_SIG_ASSIGN_PEND;     /* do indicate_req if OK  */
-				sig_req(plci, ASSIGN, DSIG_ID);
-				send_req(plci);
-			}
-		}
-	}
-}
-
-/*------------------------------------------------------------------*/
-/* functions for all parameters sent in INDs                        */
-/*------------------------------------------------------------------*/
-
-static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
-{
-	word ploc;            /* points to current location within packet */
-	byte w;
-	byte wlen;
-	byte codeset, lock;
-	byte *in;
-	word i;
-	word code;
-	word mIEindex = 0;
-	ploc = 0;
-	codeset = 0;
-	lock = 0;
-
-	in = plci->Sig.RBuffer->P;
-	for (i = 0; i < parms_id[0]; i++)   /* multiIE parms_id contains just the 1st */
-	{                            /* element but parms array is larger      */
-		parms[i] = (byte *)"";
-	}
-	for (i = 0; i < multiIEsize; i++)
-	{
-		parms[i] = (byte *)"";
-	}
-
-	while (ploc < plci->Sig.RBuffer->length - 1) {
-
-		/* read information element id and length                   */
-		w = in[ploc];
-
-		if (w & 0x80) {
-/*    w &=0xf0; removed, cannot detect congestion levels */
-/*    upper 4 bit masked with w==SHIFT now               */
-			wlen = 0;
-		}
-		else {
-			wlen = (byte)(in[ploc + 1] + 1);
-		}
-		/* check if length valid (not exceeding end of packet)      */
-		if ((ploc + wlen) > 270) return;
-		if (lock & 0x80) lock &= 0x7f;
-		else codeset = lock;
-
-		if ((w & 0xf0) == SHIFT) {
-			codeset = in[ploc];
-			if (!(codeset & 0x08)) lock = (byte)(codeset & 7);
-			codeset &= 7;
-			lock |= 0x80;
-		}
-		else {
-			if (w == ESC && wlen >= 3) code = in[ploc + 2] | 0x800;
-			else code = w;
-			code |= (codeset << 8);
-
-			for (i = 1; i < parms_id[0] + 1 && parms_id[i] != code; i++);
-
-			if (i < parms_id[0] + 1) {
-				if (!multiIEsize) { /* with multiIEs use next field index,          */
-					mIEindex = i - 1;    /* with normal IEs use same index like parms_id */
-				}
-
-				parms[mIEindex] = &in[ploc + 1];
-				dbug(1, dprintf("mIE[%d]=0x%x", *parms[mIEindex], in[ploc]));
-				if (parms_id[i] == OAD
-				    || parms_id[i] == CONN_NR
-				    || parms_id[i] == CAD) {
-					if (in[ploc + 2] & 0x80) {
-						in[ploc + 0] = (byte)(in[ploc + 1] + 1);
-						in[ploc + 1] = (byte)(in[ploc + 2] & 0x7f);
-						in[ploc + 2] = 0x80;
-						parms[mIEindex] = &in[ploc];
-					}
-				}
-				mIEindex++;       /* effects multiIEs only */
-			}
-		}
-
-		ploc += (wlen + 1);
-	}
-	return;
-}
-
-/*------------------------------------------------------------------*/
-/* try to match a cip from received BC and HLC                      */
-/*------------------------------------------------------------------*/
-
-static byte ie_compare(byte *ie1, byte *ie2)
-{
-	word i;
-	if (!ie1 || !ie2) return false;
-	if (!ie1[0]) return false;
-	for (i = 0; i < (word)(ie1[0] + 1); i++) if (ie1[i] != ie2[i]) return false;
-	return true;
-}
-
-static word find_cip(DIVA_CAPI_ADAPTER *a, byte *bc, byte *hlc)
-{
-	word i;
-	word j;
-
-	for (i = 9; i && !ie_compare(bc, cip_bc[i][a->u_law]); i--);
-
-	for (j = 16; j < 29 &&
-		     (!ie_compare(bc, cip_bc[j][a->u_law]) || !ie_compare(hlc, cip_hlc[j])); j++);
-	if (j == 29) return i;
-	return j;
-}
-
-
-static byte AddInfo(byte **add_i,
-		    byte **fty_i,
-		    byte *esc_chi,
-		    byte *facility)
-{
-	byte i;
-	byte j;
-	byte k;
-	byte flen;
-	byte len = 0;
-	/* facility is a nested structure */
-	/* FTY can be more than once      */
-
-	if (esc_chi[0] && !(esc_chi[esc_chi[0]] & 0x7f))
-	{
-		add_i[0] = (byte *)"\x02\x02\x00"; /* use neither b nor d channel */
-	}
-
-	else
-	{
-		add_i[0] = (byte *)"";
-	}
-	if (!fty_i[0][0])
-	{
-		add_i[3] = (byte *)"";
-	}
-	else
-	{    /* facility array found  */
-		for (i = 0, j = 1; i < MAX_MULTI_IE && fty_i[i][0]; i++)
-		{
-			dbug(1, dprintf("AddIFac[%d]", fty_i[i][0]));
-			len += fty_i[i][0];
-			len += 2;
-			flen = fty_i[i][0];
-			facility[j++] = 0x1c; /* copy fac IE */
-			for (k = 0; k <= flen; k++, j++)
-			{
-				facility[j] = fty_i[i][k];
-/*      dbug(1, dprintf("%x ",facility[j])); */
-			}
-		}
-		facility[0] = len;
-		add_i[3] = facility;
-	}
-/*  dbug(1, dprintf("FacArrLen=%d ",len)); */
-	len = add_i[0][0] + add_i[1][0] + add_i[2][0] + add_i[3][0];
-	len += 4;                          /* calculate length of all */
-	return (len);
-}
-
-/*------------------------------------------------------------------*/
-/* voice and codec features                                         */
-/*------------------------------------------------------------------*/
-
-static void SetVoiceChannel(PLCI *plci, byte *chi, DIVA_CAPI_ADAPTER *a)
-{
-	byte voice_chi[] = "\x02\x18\x01";
-	byte channel;
-
-	channel = chi[chi[0]] & 0x3;
-	dbug(1, dprintf("ExtDevON(Ch=0x%x)", channel));
-	voice_chi[2] = (channel) ? channel : 1;
-	add_p(plci, FTY, "\x02\x01\x07");             /* B On, default on 1 */
-	add_p(plci, ESC, voice_chi);                  /* Channel */
-	sig_req(plci, TEL_CTRL, 0);
-	send_req(plci);
-	if (a->AdvSignalPLCI)
-	{
-		adv_voice_write_coefs(a->AdvSignalPLCI, ADV_VOICE_WRITE_ACTIVATION);
-	}
-}
-
-static void VoiceChannelOff(PLCI *plci)
-{
-	dbug(1, dprintf("ExtDevOFF"));
-	add_p(plci, FTY, "\x02\x01\x08");             /* B Off */
-	sig_req(plci, TEL_CTRL, 0);
-	send_req(plci);
-	if (plci->adapter->AdvSignalPLCI)
-	{
-		adv_voice_clear_config(plci->adapter->AdvSignalPLCI);
-	}
-}
-
-
-static word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl,
-			    byte hook_listen)
-{
-	word j;
-	PLCI *splci;
-
-	/* check if hardware supports handset with hook states (adv.codec) */
-	/* or if just a on board codec is supported                        */
-	/* the advanced codec plci is just for internal use                */
-
-	/* diva Pro with on-board codec:                                   */
-	if (a->profile.Global_Options & HANDSET)
-	{
-		/* new call, but hook states are already signalled */
-		if (a->AdvCodecFLAG)
-		{
-			if (a->AdvSignalAppl != appl || a->AdvSignalPLCI)
-			{
-				dbug(1, dprintf("AdvSigPlci=0x%x", a->AdvSignalPLCI));
-				return 0x2001; /* codec in use by another application */
-			}
-			if (plci != NULL)
-			{
-				a->AdvSignalPLCI = plci;
-				plci->tel = ADV_VOICE;
-			}
-			return 0;                      /* adv codec still used */
-		}
-		if ((j = get_plci(a)))
-		{
-			splci = &a->plci[j - 1];
-			splci->tel = CODEC_PERMANENT;
-			/* hook_listen indicates if a facility_req with handset/hook support */
-			/* was sent. Otherwise if just a call on an external device was made */
-			/* the codec will be used but the hook info will be discarded (just  */
-			/* the external controller is in use                                 */
-			if (hook_listen) splci->State = ADVANCED_VOICE_SIG;
-			else
-			{
-				splci->State = ADVANCED_VOICE_NOSIG;
-				if (plci)
-				{
-					plci->spoofed_msg = SPOOFING_REQUIRED;
-				}
-				/* indicate D-ch connect if  */
-			}                                        /* codec is connected OK     */
-			if (plci != NULL)
-			{
-				a->AdvSignalPLCI = plci;
-				plci->tel = ADV_VOICE;
-			}
-			a->AdvSignalAppl = appl;
-			a->AdvCodecFLAG = true;
-			a->AdvCodecPLCI = splci;
-			add_p(splci, CAI, "\x01\x15");
-			add_p(splci, LLI, "\x01\x00");
-			add_p(splci, ESC, "\x02\x18\x00");
-			add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-			splci->internal_command = PERM_COD_ASSIGN;
-			dbug(1, dprintf("Codec Assign"));
-			sig_req(splci, ASSIGN, DSIG_ID);
-			send_req(splci);
-		}
-		else
-		{
-			return 0x2001; /* wrong state, no more plcis */
-		}
-	}
-	else if (a->profile.Global_Options & ON_BOARD_CODEC)
-	{
-		if (hook_listen) return 0x300B;               /* Facility not supported */
-		/* no hook with SCOM      */
-		if (plci != NULL) plci->tel = CODEC;
-		dbug(1, dprintf("S/SCOM codec"));
-		/* first time we use the scom-s codec we must shut down the internal   */
-		/* handset application of the card. This can be done by an assign with */
-		/* a cai with the 0x80 bit set. Assign return code is 'out of resource'*/
-		if (!a->scom_appl_disable) {
-			if ((j = get_plci(a))) {
-				splci = &a->plci[j - 1];
-				add_p(splci, CAI, "\x01\x80");
-				add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-				sig_req(splci, ASSIGN, 0xC0);  /* 0xc0 is the TEL_ID */
-				send_req(splci);
-				a->scom_appl_disable = true;
-			}
-			else{
-				return 0x2001; /* wrong state, no more plcis */
-			}
-		}
-	}
-	else return 0x300B;               /* Facility not supported */
-
-	return 0;
-}
-
-
-static void CodecIdCheck(DIVA_CAPI_ADAPTER *a, PLCI *plci)
-{
-
-	dbug(1, dprintf("CodecIdCheck"));
-
-	if (a->AdvSignalPLCI == plci)
-	{
-		dbug(1, dprintf("PLCI owns codec"));
-		VoiceChannelOff(a->AdvCodecPLCI);
-		if (a->AdvCodecPLCI->State == ADVANCED_VOICE_NOSIG)
-		{
-			dbug(1, dprintf("remove temp codec PLCI"));
-			plci_remove(a->AdvCodecPLCI);
-			a->AdvCodecFLAG  = 0;
-			a->AdvCodecPLCI  = NULL;
-			a->AdvSignalAppl = NULL;
-		}
-		a->AdvSignalPLCI = NULL;
-	}
-}
-
-/* -------------------------------------------------------------------
-   Ask for physical address of card on PCI bus
-   ------------------------------------------------------------------- */
-static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *a,
-				       IDI_SYNC_REQ *preq) {
-	a->sdram_bar = 0;
-	if (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR) {
-		ENTITY *e = (ENTITY *)preq;
-
-		e->user[0] = a->Id - 1;
-		preq->xdi_sdram_bar.info.bar    = 0;
-		preq->xdi_sdram_bar.Req         = 0;
-		preq->xdi_sdram_bar.Rc           = IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR;
-
-		(*(a->request))(e);
-
-		a->sdram_bar = preq->xdi_sdram_bar.info.bar;
-		dbug(3, dprintf("A(%d) SDRAM BAR = %08x", a->Id, a->sdram_bar));
-	}
-}
-
-/* -------------------------------------------------------------------
-   Ask XDI about extended features
-   ------------------------------------------------------------------- */
-static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a) {
-	IDI_SYNC_REQ *preq;
-	char buffer[((sizeof(preq->xdi_extended_features) + 4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features) + 4) : sizeof(ENTITY)];
-
-	char features[4];
-	preq = (IDI_SYNC_REQ *)&buffer[0];
-
-	if (!diva_xdi_extended_features) {
-		ENTITY *e = (ENTITY *)preq;
-		diva_xdi_extended_features |= 0x80000000;
-
-		e->user[0] = a->Id - 1;
-		preq->xdi_extended_features.Req = 0;
-		preq->xdi_extended_features.Rc  = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES;
-		preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features);
-		preq->xdi_extended_features.info.features = &features[0];
-
-		(*(a->request))(e);
-
-		if (features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) {
-			/*
-			  Check features located in the byte '0'
-			*/
-			if (features[0] & DIVA_XDI_EXTENDED_FEATURE_CMA) {
-				diva_xdi_extended_features |= DIVA_CAPI_USE_CMA;
-			}
-			if (features[0] & DIVA_XDI_EXTENDED_FEATURE_RX_DMA) {
-				diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_RX_DMA;
-				dbug(1, dprintf("XDI provides RxDMA"));
-			}
-			if (features[0] & DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR) {
-				diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR;
-			}
-			if (features[0] & DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC) {
-				diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_NO_CANCEL;
-				dbug(3, dprintf("XDI provides NO_CANCEL_RC feature"));
-			}
-
-		}
-	}
-
-	diva_ask_for_xdi_sdram_bar(a, preq);
-}
-
-/*------------------------------------------------------------------*/
-/* automatic law                                                    */
-/*------------------------------------------------------------------*/
-/* called from OS specific part after init time to get the Law              */
-/* a-law (Euro) and u-law (us,japan) use different BCs in the Setup message */
-void AutomaticLaw(DIVA_CAPI_ADAPTER *a)
-{
-	word j;
-	PLCI *splci;
-
-	if (a->automatic_law) {
-		return;
-	}
-	if ((j = get_plci(a))) {
-		diva_get_extended_adapter_features(a);
-		splci = &a->plci[j - 1];
-		a->automatic_lawPLCI = splci;
-		a->automatic_law = 1;
-		add_p(splci, CAI, "\x01\x80");
-		add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-		splci->internal_command = USELAW_REQ;
-		splci->command = 0;
-		splci->number = 0;
-		sig_req(splci, ASSIGN, DSIG_ID);
-		send_req(splci);
-	}
-}
-
-/* called from OS specific part if an application sends an Capi20Release */
-word CapiRelease(word Id)
-{
-	word i, j, appls_found;
-	PLCI *plci;
-	APPL   *this;
-	DIVA_CAPI_ADAPTER *a;
-
-	if (!Id)
-	{
-		dbug(0, dprintf("A: CapiRelease(Id==0)"));
-		return (_WRONG_APPL_ID);
-	}
-
-	this = &application[Id - 1];               /* get application pointer */
-
-	for (i = 0, appls_found = 0; i < max_appl; i++)
-	{
-		if (application[i].Id)       /* an application has been found        */
-		{
-			appls_found++;
-		}
-	}
-
-	for (i = 0; i < max_adapter; i++)             /* scan all adapters...    */
-	{
-		a = &adapter[i];
-		if (a->request)
-		{
-			a->Info_Mask[Id - 1] = 0;
-			a->CIP_Mask[Id - 1] = 0;
-			a->Notification_Mask[Id - 1] = 0;
-			a->codec_listen[Id - 1] = NULL;
-			a->requested_options_table[Id - 1] = 0;
-			for (j = 0; j < a->max_plci; j++)           /* and all PLCIs connected */
-			{                                      /* with this application   */
-				plci = &a->plci[j];
-				if (plci->Id)                         /* if plci owns no application */
-				{                                    /* it may be not jet connected */
-					if (plci->State == INC_CON_PENDING
-					    || plci->State == INC_CON_ALERT)
-					{
-						if (test_bit(Id - 1, plci->c_ind_mask_table))
-						{
-							__clear_bit(Id - 1, plci->c_ind_mask_table);
-							if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
-							{
-								sig_req(plci, HANGUP, 0);
-								send_req(plci);
-								plci->State = OUTG_DIS_PENDING;
-							}
-						}
-					}
-					if (test_bit(Id - 1, plci->c_ind_mask_table))
-					{
-						__clear_bit(Id - 1, plci->c_ind_mask_table);
-						if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
-						{
-							if (!plci->appl)
-							{
-								plci_remove(plci);
-								plci->State = IDLE;
-							}
-						}
-					}
-					if (plci->appl == this)
-					{
-						plci->appl = NULL;
-						plci_remove(plci);
-						plci->State = IDLE;
-					}
-				}
-			}
-			listen_check(a);
-
-			if (a->flag_dynamic_l1_down)
-			{
-				if (appls_found == 1)            /* last application does a capi release */
-				{
-					if ((j = get_plci(a)))
-					{
-						plci = &a->plci[j - 1];
-						plci->command = 0;
-						add_p(plci, OAD, "\x01\xfd");
-						add_p(plci, CAI, "\x01\x80");
-						add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-						add_p(plci, SHIFT | 6, NULL);
-						add_p(plci, SIN, "\x02\x00\x00");
-						plci->internal_command = REM_L1_SIG_ASSIGN_PEND;
-						sig_req(plci, ASSIGN, DSIG_ID);
-						add_p(plci, FTY, "\x02\xff\x06"); /* l1 down */
-						sig_req(plci, SIG_CTRL, 0);
-						send_req(plci);
-					}
-				}
-			}
-			if (a->AdvSignalAppl == this)
-			{
-				this->NullCREnable = false;
-				if (a->AdvCodecPLCI)
-				{
-					plci_remove(a->AdvCodecPLCI);
-					a->AdvCodecPLCI->tel = 0;
-					a->AdvCodecPLCI->adv_nl = 0;
-				}
-				a->AdvSignalAppl = NULL;
-				a->AdvSignalPLCI = NULL;
-				a->AdvCodecFLAG = 0;
-				a->AdvCodecPLCI = NULL;
-			}
-		}
-	}
-
-	this->Id = 0;
-
-	return GOOD;
-}
-
-static word plci_remove_check(PLCI *plci)
-{
-	if (!plci) return true;
-	if (!plci->NL.Id && bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
-	{
-		if (plci->Sig.Id == 0xff)
-			plci->Sig.Id = 0;
-		if (!plci->Sig.Id)
-		{
-			dbug(1, dprintf("plci_remove_complete(%x)", plci->Id));
-			dbug(1, dprintf("tel=0x%x,Sig=0x%x", plci->tel, plci->Sig.Id));
-			if (plci->Id)
-			{
-				CodecIdCheck(plci->adapter, plci);
-				clear_b1_config(plci);
-				ncci_remove(plci, 0, false);
-				plci_free_msg_in_queue(plci);
-				channel_flow_control_remove(plci);
-				plci->Id = 0;
-				plci->State = IDLE;
-				plci->channels = 0;
-				plci->appl = NULL;
-				plci->notifiedcall = 0;
-			}
-			listen_check(plci->adapter);
-			return true;
-		}
-	}
-	return false;
-}
-
-
-/*------------------------------------------------------------------*/
-
-static byte plci_nl_busy(PLCI *plci)
-{
-	/* only applicable for non-multiplexed protocols */
-	return (plci->nl_req
-		|| (plci->ncci_ring_list
-		    && plci->adapter->ncci_ch[plci->ncci_ring_list]
-		    && (plci->adapter->ch_flow_control[plci->adapter->ncci_ch[plci->ncci_ring_list]] & N_OK_FC_PENDING)));
-}
-
-
-/*------------------------------------------------------------------*/
-/* DTMF facilities                                                  */
-/*------------------------------------------------------------------*/
-
-
-static struct
-{
-	byte send_mask;
-	byte listen_mask;
-	byte character;
-	byte code;
-} dtmf_digit_map[] =
-{
-	{ 0x01, 0x01, 0x23, DTMF_DIGIT_TONE_CODE_HASHMARK },
-	{ 0x01, 0x01, 0x2a, DTMF_DIGIT_TONE_CODE_STAR },
-	{ 0x01, 0x01, 0x30, DTMF_DIGIT_TONE_CODE_0 },
-	{ 0x01, 0x01, 0x31, DTMF_DIGIT_TONE_CODE_1 },
-	{ 0x01, 0x01, 0x32, DTMF_DIGIT_TONE_CODE_2 },
-	{ 0x01, 0x01, 0x33, DTMF_DIGIT_TONE_CODE_3 },
-	{ 0x01, 0x01, 0x34, DTMF_DIGIT_TONE_CODE_4 },
-	{ 0x01, 0x01, 0x35, DTMF_DIGIT_TONE_CODE_5 },
-	{ 0x01, 0x01, 0x36, DTMF_DIGIT_TONE_CODE_6 },
-	{ 0x01, 0x01, 0x37, DTMF_DIGIT_TONE_CODE_7 },
-	{ 0x01, 0x01, 0x38, DTMF_DIGIT_TONE_CODE_8 },
-	{ 0x01, 0x01, 0x39, DTMF_DIGIT_TONE_CODE_9 },
-	{ 0x01, 0x01, 0x41, DTMF_DIGIT_TONE_CODE_A },
-	{ 0x01, 0x01, 0x42, DTMF_DIGIT_TONE_CODE_B },
-	{ 0x01, 0x01, 0x43, DTMF_DIGIT_TONE_CODE_C },
-	{ 0x01, 0x01, 0x44, DTMF_DIGIT_TONE_CODE_D },
-	{ 0x01, 0x00, 0x61, DTMF_DIGIT_TONE_CODE_A },
-	{ 0x01, 0x00, 0x62, DTMF_DIGIT_TONE_CODE_B },
-	{ 0x01, 0x00, 0x63, DTMF_DIGIT_TONE_CODE_C },
-	{ 0x01, 0x00, 0x64, DTMF_DIGIT_TONE_CODE_D },
-
-	{ 0x04, 0x04, 0x80, DTMF_SIGNAL_NO_TONE },
-	{ 0x00, 0x04, 0x81, DTMF_SIGNAL_UNIDENTIFIED_TONE },
-	{ 0x04, 0x04, 0x82, DTMF_SIGNAL_DIAL_TONE },
-	{ 0x04, 0x04, 0x83, DTMF_SIGNAL_PABX_INTERNAL_DIAL_TONE },
-	{ 0x04, 0x04, 0x84, DTMF_SIGNAL_SPECIAL_DIAL_TONE },
-	{ 0x04, 0x04, 0x85, DTMF_SIGNAL_SECOND_DIAL_TONE },
-	{ 0x04, 0x04, 0x86, DTMF_SIGNAL_RINGING_TONE },
-	{ 0x04, 0x04, 0x87, DTMF_SIGNAL_SPECIAL_RINGING_TONE },
-	{ 0x04, 0x04, 0x88, DTMF_SIGNAL_BUSY_TONE },
-	{ 0x04, 0x04, 0x89, DTMF_SIGNAL_CONGESTION_TONE },
-	{ 0x04, 0x04, 0x8a, DTMF_SIGNAL_SPECIAL_INFORMATION_TONE },
-	{ 0x04, 0x04, 0x8b, DTMF_SIGNAL_COMFORT_TONE },
-	{ 0x04, 0x04, 0x8c, DTMF_SIGNAL_HOLD_TONE },
-	{ 0x04, 0x04, 0x8d, DTMF_SIGNAL_RECORD_TONE },
-	{ 0x04, 0x04, 0x8e, DTMF_SIGNAL_CALLER_WAITING_TONE },
-	{ 0x04, 0x04, 0x8f, DTMF_SIGNAL_CALL_WAITING_TONE },
-	{ 0x04, 0x04, 0x90, DTMF_SIGNAL_PAY_TONE },
-	{ 0x04, 0x04, 0x91, DTMF_SIGNAL_POSITIVE_INDICATION_TONE },
-	{ 0x04, 0x04, 0x92, DTMF_SIGNAL_NEGATIVE_INDICATION_TONE },
-	{ 0x04, 0x04, 0x93, DTMF_SIGNAL_WARNING_TONE },
-	{ 0x04, 0x04, 0x94, DTMF_SIGNAL_INTRUSION_TONE },
-	{ 0x04, 0x04, 0x95, DTMF_SIGNAL_CALLING_CARD_SERVICE_TONE },
-	{ 0x04, 0x04, 0x96, DTMF_SIGNAL_PAYPHONE_RECOGNITION_TONE },
-	{ 0x04, 0x04, 0x97, DTMF_SIGNAL_CPE_ALERTING_SIGNAL },
-	{ 0x04, 0x04, 0x98, DTMF_SIGNAL_OFF_HOOK_WARNING_TONE },
-	{ 0x04, 0x04, 0xbf, DTMF_SIGNAL_INTERCEPT_TONE },
-	{ 0x04, 0x04, 0xc0, DTMF_SIGNAL_MODEM_CALLING_TONE },
-	{ 0x04, 0x04, 0xc1, DTMF_SIGNAL_FAX_CALLING_TONE },
-	{ 0x04, 0x04, 0xc2, DTMF_SIGNAL_ANSWER_TONE },
-	{ 0x04, 0x04, 0xc3, DTMF_SIGNAL_REVERSED_ANSWER_TONE },
-	{ 0x04, 0x04, 0xc4, DTMF_SIGNAL_ANSAM_TONE },
-	{ 0x04, 0x04, 0xc5, DTMF_SIGNAL_REVERSED_ANSAM_TONE },
-	{ 0x04, 0x04, 0xc6, DTMF_SIGNAL_BELL103_ANSWER_TONE },
-	{ 0x04, 0x04, 0xc7, DTMF_SIGNAL_FAX_FLAGS },
-	{ 0x04, 0x04, 0xc8, DTMF_SIGNAL_G2_FAX_GROUP_ID },
-	{ 0x00, 0x04, 0xc9, DTMF_SIGNAL_HUMAN_SPEECH },
-	{ 0x04, 0x04, 0xca, DTMF_SIGNAL_ANSWERING_MACHINE_390 },
-	{ 0x02, 0x02, 0xf1, DTMF_MF_DIGIT_TONE_CODE_1 },
-	{ 0x02, 0x02, 0xf2, DTMF_MF_DIGIT_TONE_CODE_2 },
-	{ 0x02, 0x02, 0xf3, DTMF_MF_DIGIT_TONE_CODE_3 },
-	{ 0x02, 0x02, 0xf4, DTMF_MF_DIGIT_TONE_CODE_4 },
-	{ 0x02, 0x02, 0xf5, DTMF_MF_DIGIT_TONE_CODE_5 },
-	{ 0x02, 0x02, 0xf6, DTMF_MF_DIGIT_TONE_CODE_6 },
-	{ 0x02, 0x02, 0xf7, DTMF_MF_DIGIT_TONE_CODE_7 },
-	{ 0x02, 0x02, 0xf8, DTMF_MF_DIGIT_TONE_CODE_8 },
-	{ 0x02, 0x02, 0xf9, DTMF_MF_DIGIT_TONE_CODE_9 },
-	{ 0x02, 0x02, 0xfa, DTMF_MF_DIGIT_TONE_CODE_0 },
-	{ 0x02, 0x02, 0xfb, DTMF_MF_DIGIT_TONE_CODE_K1 },
-	{ 0x02, 0x02, 0xfc, DTMF_MF_DIGIT_TONE_CODE_K2 },
-	{ 0x02, 0x02, 0xfd, DTMF_MF_DIGIT_TONE_CODE_KP },
-	{ 0x02, 0x02, 0xfe, DTMF_MF_DIGIT_TONE_CODE_S1 },
-	{ 0x02, 0x02, 0xff, DTMF_MF_DIGIT_TONE_CODE_ST },
-
-};
-
-#define DTMF_DIGIT_MAP_ENTRIES ARRAY_SIZE(dtmf_digit_map)
-
-
-static void dtmf_enable_receiver(PLCI *plci, byte enable_mask)
-{
-	word min_digit_duration, min_gap_duration;
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_enable_receiver %02x",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, enable_mask));
-
-	if (enable_mask != 0)
-	{
-		min_digit_duration = (plci->dtmf_rec_pulse_ms == 0) ? 40 : plci->dtmf_rec_pulse_ms;
-		min_gap_duration = (plci->dtmf_rec_pause_ms == 0) ? 40 : plci->dtmf_rec_pause_ms;
-		plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_ENABLE_RECEIVER;
-		PUT_WORD(&plci->internal_req_buffer[1], min_digit_duration);
-		PUT_WORD(&plci->internal_req_buffer[3], min_gap_duration);
-		plci->NData[0].PLength = 5;
-
-		PUT_WORD(&plci->internal_req_buffer[5], INTERNAL_IND_BUFFER_SIZE);
-		plci->NData[0].PLength += 2;
-		capidtmf_recv_enable(&(plci->capidtmf_state), min_digit_duration, min_gap_duration);
-
-	}
-	else
-	{
-		plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_DISABLE_RECEIVER;
-		plci->NData[0].PLength = 1;
-
-		capidtmf_recv_disable(&(plci->capidtmf_state));
-
-	}
-	plci->NData[0].P = plci->internal_req_buffer;
-	plci->NL.X = plci->NData;
-	plci->NL.ReqCh = 0;
-	plci->NL.Req = plci->nl_req = (byte) N_UDATA;
-	plci->adapter->request(&plci->NL);
-}
-
-
-static void dtmf_send_digits(PLCI *plci, byte *digit_buffer, word digit_count)
-{
-	word w, i;
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_digits %d",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, digit_count));
-
-	plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_SEND_DIGITS;
-	w = (plci->dtmf_send_pulse_ms == 0) ? 40 : plci->dtmf_send_pulse_ms;
-	PUT_WORD(&plci->internal_req_buffer[1], w);
-	w = (plci->dtmf_send_pause_ms == 0) ? 40 : plci->dtmf_send_pause_ms;
-	PUT_WORD(&plci->internal_req_buffer[3], w);
-	for (i = 0; i < digit_count; i++)
-	{
-		w = 0;
-		while ((w < DTMF_DIGIT_MAP_ENTRIES)
-		       && (digit_buffer[i] != dtmf_digit_map[w].character))
-		{
-			w++;
-		}
-		plci->internal_req_buffer[5 + i] = (w < DTMF_DIGIT_MAP_ENTRIES) ?
-			dtmf_digit_map[w].code : DTMF_DIGIT_TONE_CODE_STAR;
-	}
-	plci->NData[0].PLength = 5 + digit_count;
-	plci->NData[0].P = plci->internal_req_buffer;
-	plci->NL.X = plci->NData;
-	plci->NL.ReqCh = 0;
-	plci->NL.Req = plci->nl_req = (byte) N_UDATA;
-	plci->adapter->request(&plci->NL);
-}
-
-
-static void dtmf_rec_clear_config(PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_rec_clear_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	plci->dtmf_rec_active = 0;
-	plci->dtmf_rec_pulse_ms = 0;
-	plci->dtmf_rec_pause_ms = 0;
-
-	capidtmf_init(&(plci->capidtmf_state), plci->adapter->u_law);
-
-}
-
-
-static void dtmf_send_clear_config(PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_clear_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	plci->dtmf_send_requests = 0;
-	plci->dtmf_send_pulse_ms = 0;
-	plci->dtmf_send_pause_ms = 0;
-}
-
-
-static void dtmf_prepare_switch(dword Id, PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_prepare_switch",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	while (plci->dtmf_send_requests != 0)
-		dtmf_confirmation(Id, plci);
-}
-
-
-static word dtmf_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_save_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	return (GOOD);
-}
-
-
-static word dtmf_restore_config(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_restore_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	Info = GOOD;
-	if (plci->B1_facilities & B1_FACILITY_DTMFR)
-	{
-		switch (plci->adjust_b_state)
-		{
-		case ADJUST_B_RESTORE_DTMF_1:
-			plci->internal_command = plci->adjust_b_command;
-			if (plci_nl_busy(plci))
-			{
-				plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
-				break;
-			}
-			dtmf_enable_receiver(plci, plci->dtmf_rec_active);
-			plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_2;
-			break;
-		case ADJUST_B_RESTORE_DTMF_2:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Reenable DTMF receiver failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _WRONG_STATE;
-				break;
-			}
-			break;
-		}
-	}
-	return (Info);
-}
-
-
-static void dtmf_command(dword Id, PLCI *plci, byte Rc)
-{
-	word internal_command, Info;
-	byte mask;
-	byte result[4];
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_command %02x %04x %04x %d %d %d %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
-			plci->dtmf_cmd, plci->dtmf_rec_pulse_ms, plci->dtmf_rec_pause_ms,
-			plci->dtmf_send_pulse_ms, plci->dtmf_send_pause_ms));
-
-	Info = GOOD;
-	result[0] = 2;
-	PUT_WORD(&result[1], DTMF_SUCCESS);
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	mask = 0x01;
-	switch (plci->dtmf_cmd)
-	{
-
-	case DTMF_LISTEN_TONE_START:
-		mask <<= 1; /* fall through */
-	case DTMF_LISTEN_MF_START:
-		mask <<= 1; /* fall through */
-
-	case DTMF_LISTEN_START:
-		switch (internal_command)
-		{
-		default:
-			adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
-								  B1_FACILITY_DTMFR), DTMF_COMMAND_1);
-			/* fall through */
-		case DTMF_COMMAND_1:
-			if (adjust_b_process(Id, plci, Rc) != GOOD)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			if (plci->internal_command)
-				return;
-			/* fall through */
-		case DTMF_COMMAND_2:
-			if (plci_nl_busy(plci))
-			{
-				plci->internal_command = DTMF_COMMAND_2;
-				return;
-			}
-			plci->internal_command = DTMF_COMMAND_3;
-			dtmf_enable_receiver(plci, (byte)(plci->dtmf_rec_active | mask));
-			return;
-		case DTMF_COMMAND_3:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Enable DTMF receiver failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-
-			plci->tone_last_indication_code = DTMF_SIGNAL_NO_TONE;
-
-			plci->dtmf_rec_active |= mask;
-			break;
-		}
-		break;
-
-
-	case DTMF_LISTEN_TONE_STOP:
-		mask <<= 1; /* fall through */
-	case DTMF_LISTEN_MF_STOP:
-		mask <<= 1; /* fall through */
-
-	case DTMF_LISTEN_STOP:
-		switch (internal_command)
-		{
-		default:
-			plci->dtmf_rec_active &= ~mask;
-			if (plci->dtmf_rec_active)
-				break;
-/*
-  case DTMF_COMMAND_1:
-  if (plci->dtmf_rec_active)
-  {
-  if (plci_nl_busy (plci))
-  {
-  plci->internal_command = DTMF_COMMAND_1;
-  return;
-  }
-  plci->dtmf_rec_active &= ~mask;
-  plci->internal_command = DTMF_COMMAND_2;
-  dtmf_enable_receiver (plci, false);
-  return;
-  }
-  Rc = OK;
-  case DTMF_COMMAND_2:
-  if ((Rc != OK) && (Rc != OK_FC))
-  {
-  dbug (1, dprintf("[%06lx] %s,%d: Disable DTMF receiver failed %02x",
-  UnMapId (Id), (char far *)(FILE_), __LINE__, Rc));
-  Info = _FACILITY_NOT_SUPPORTED;
-  break;
-  }
-*/
-			adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
-								  ~(B1_FACILITY_DTMFX | B1_FACILITY_DTMFR)), DTMF_COMMAND_3);
-			/* fall through */
-		case DTMF_COMMAND_3:
-			if (adjust_b_process(Id, plci, Rc) != GOOD)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Unload DTMF failed",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			if (plci->internal_command)
-				return;
-			break;
-		}
-		break;
-
-
-	case DTMF_SEND_TONE:
-		mask <<= 1; /* fall through */
-	case DTMF_SEND_MF:
-		mask <<= 1; /* fall through */
-
-	case DTMF_DIGITS_SEND:
-		switch (internal_command)
-		{
-		default:
-			adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
-								  ((plci->dtmf_parameter_length != 0) ? B1_FACILITY_DTMFX | B1_FACILITY_DTMFR : B1_FACILITY_DTMFX)),
-					   DTMF_COMMAND_1);
-			/* fall through */
-		case DTMF_COMMAND_1:
-			if (adjust_b_process(Id, plci, Rc) != GOOD)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			if (plci->internal_command)
-				return;
-			/* fall through */
-		case DTMF_COMMAND_2:
-			if (plci_nl_busy(plci))
-			{
-				plci->internal_command = DTMF_COMMAND_2;
-				return;
-			}
-			plci->dtmf_msg_number_queue[(plci->dtmf_send_requests)++] = plci->number;
-			plci->internal_command = DTMF_COMMAND_3;
-			dtmf_send_digits(plci, &plci->saved_msg.parms[3].info[1], plci->saved_msg.parms[3].length);
-			return;
-		case DTMF_COMMAND_3:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Send DTMF digits failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				if (plci->dtmf_send_requests != 0)
-					(plci->dtmf_send_requests)--;
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			return;
-		}
-		break;
-	}
-	sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
-	      "wws", Info, SELECTOR_DTMF, result);
-}
-
-
-static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL   *appl, API_PARSE *msg)
-{
-	word Info;
-	word i, j;
-	byte mask;
-	API_PARSE dtmf_parms[5];
-	byte result[40];
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_request",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	Info = GOOD;
-	result[0] = 2;
-	PUT_WORD(&result[1], DTMF_SUCCESS);
-	if (!(a->profile.Global_Options & GL_DTMF_SUPPORTED))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		Info = _FACILITY_NOT_SUPPORTED;
-	}
-	else if (api_parse(&msg[1].info[1], msg[1].length, "w", dtmf_parms))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		Info = _WRONG_MESSAGE_FORMAT;
-	}
-
-	else if ((GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
-		 || (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_SEND_CODES))
-	{
-		if (!((a->requested_options_table[appl->Id - 1])
-		      & (1L << PRIVATE_DTMF_TONE)))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
-			PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
-		}
-		else
-		{
-			for (i = 0; i < 32; i++)
-				result[4 + i] = 0;
-			if (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
-			{
-				for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
-				{
-					if (dtmf_digit_map[i].listen_mask != 0)
-						result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
-				}
-			}
-			else
-			{
-				for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
-				{
-					if (dtmf_digit_map[i].send_mask != 0)
-						result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
-				}
-			}
-			result[0] = 3 + 32;
-			result[3] = 32;
-		}
-	}
-
-	else if (plci == NULL)
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		Info = _WRONG_IDENTIFIER;
-	}
-	else
-	{
-		if (!plci->State
-		    || !plci->NL.Id || plci->nl_remove_id)
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
-					UnMapId(Id), (char *)(FILE_), __LINE__));
-			Info = _WRONG_STATE;
-		}
-		else
-		{
-			plci->command = 0;
-			plci->dtmf_cmd = GET_WORD(dtmf_parms[0].info);
-			mask = 0x01;
-			switch (plci->dtmf_cmd)
-			{
-
-			case DTMF_LISTEN_TONE_START:
-			case DTMF_LISTEN_TONE_STOP:
-				mask <<= 1; /* fall through */
-			case DTMF_LISTEN_MF_START:
-			case DTMF_LISTEN_MF_STOP:
-				mask <<= 1;
-				if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
-				      & (1L << PRIVATE_DTMF_TONE)))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
-							UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
-					PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
-					break;
-				}
-				/* fall through */
-
-			case DTMF_LISTEN_START:
-			case DTMF_LISTEN_STOP:
-				if (!(a->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
-				    && !(a->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _FACILITY_NOT_SUPPORTED;
-					break;
-				}
-				if (mask & DTMF_LISTEN_ACTIVE_FLAG)
-				{
-					if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
-					{
-						plci->dtmf_rec_pulse_ms = 0;
-						plci->dtmf_rec_pause_ms = 0;
-					}
-					else
-					{
-						plci->dtmf_rec_pulse_ms = GET_WORD(dtmf_parms[1].info);
-						plci->dtmf_rec_pause_ms = GET_WORD(dtmf_parms[2].info);
-					}
-				}
-				start_internal_command(Id, plci, dtmf_command);
-				return (false);
-
-
-			case DTMF_SEND_TONE:
-				mask <<= 1; /* fall through */
-			case DTMF_SEND_MF:
-				mask <<= 1;
-				if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
-				      & (1L << PRIVATE_DTMF_TONE)))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
-							UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
-					PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
-					break;
-				}
-				/* fall through */
-
-			case DTMF_DIGITS_SEND:
-				if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				if (mask & DTMF_LISTEN_ACTIVE_FLAG)
-				{
-					plci->dtmf_send_pulse_ms = GET_WORD(dtmf_parms[1].info);
-					plci->dtmf_send_pause_ms = GET_WORD(dtmf_parms[2].info);
-				}
-				i = 0;
-				j = 0;
-				while ((i < dtmf_parms[3].length) && (j < DTMF_DIGIT_MAP_ENTRIES))
-				{
-					j = 0;
-					while ((j < DTMF_DIGIT_MAP_ENTRIES)
-					       && ((dtmf_parms[3].info[i + 1] != dtmf_digit_map[j].character)
-						   || ((dtmf_digit_map[j].send_mask & mask) == 0)))
-					{
-						j++;
-					}
-					i++;
-				}
-				if (j == DTMF_DIGIT_MAP_ENTRIES)
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Incorrect DTMF digit %02x",
-							UnMapId(Id), (char *)(FILE_), __LINE__, dtmf_parms[3].info[i]));
-					PUT_WORD(&result[1], DTMF_INCORRECT_DIGIT);
-					break;
-				}
-				if (plci->dtmf_send_requests >= ARRAY_SIZE(plci->dtmf_msg_number_queue))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: DTMF request overrun",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_STATE;
-					break;
-				}
-				api_save_msg(dtmf_parms, "wwws", &plci->saved_msg);
-				start_internal_command(Id, plci, dtmf_command);
-				return (false);
-
-			default:
-				dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, plci->dtmf_cmd));
-				PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
-			}
-		}
-	}
-	sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
-	      "wws", Info, SELECTOR_DTMF, result);
-	return (false);
-}
-
-
-static void dtmf_confirmation(dword Id, PLCI *plci)
-{
-	word i;
-	byte result[4];
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_confirmation",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	result[0] = 2;
-	PUT_WORD(&result[1], DTMF_SUCCESS);
-	if (plci->dtmf_send_requests != 0)
-	{
-		sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->dtmf_msg_number_queue[0],
-		      "wws", GOOD, SELECTOR_DTMF, result);
-		(plci->dtmf_send_requests)--;
-		for (i = 0; i < plci->dtmf_send_requests; i++)
-			plci->dtmf_msg_number_queue[i] = plci->dtmf_msg_number_queue[i + 1];
-	}
-}
-
-
-static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length)
-{
-	word i, j, n;
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_indication",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	n = 0;
-	for (i = 1; i < length; i++)
-	{
-		j = 0;
-		while ((j < DTMF_DIGIT_MAP_ENTRIES)
-		       && ((msg[i] != dtmf_digit_map[j].code)
-			   || ((dtmf_digit_map[j].listen_mask & plci->dtmf_rec_active) == 0)))
-		{
-			j++;
-		}
-		if (j < DTMF_DIGIT_MAP_ENTRIES)
-		{
-
-			if ((dtmf_digit_map[j].listen_mask & DTMF_TONE_LISTEN_ACTIVE_FLAG)
-			    && (plci->tone_last_indication_code == DTMF_SIGNAL_NO_TONE)
-			    && (dtmf_digit_map[j].character != DTMF_SIGNAL_UNIDENTIFIED_TONE))
-			{
-				if (n + 1 == i)
-				{
-					for (i = length; i > n + 1; i--)
-						msg[i] = msg[i - 1];
-					length++;
-					i++;
-				}
-				msg[++n] = DTMF_SIGNAL_UNIDENTIFIED_TONE;
-			}
-			plci->tone_last_indication_code = dtmf_digit_map[j].character;
-
-			msg[++n] = dtmf_digit_map[j].character;
-		}
-	}
-	if (n != 0)
-	{
-		msg[0] = (byte) n;
-		sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "wS", SELECTOR_DTMF, msg);
-	}
-}
-
-
-/*------------------------------------------------------------------*/
-/* DTMF parameters                                                  */
-/*------------------------------------------------------------------*/
-
-static void dtmf_parameter_write(PLCI *plci)
-{
-	word i;
-	byte parameter_buffer[DTMF_PARAMETER_BUFFER_SIZE + 2];
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_write",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	parameter_buffer[0] = plci->dtmf_parameter_length + 1;
-	parameter_buffer[1] = DSP_CTRL_SET_DTMF_PARAMETERS;
-	for (i = 0; i < plci->dtmf_parameter_length; i++)
-		parameter_buffer[2 + i] = plci->dtmf_parameter_buffer[i];
-	add_p(plci, FTY, parameter_buffer);
-	sig_req(plci, TEL_CTRL, 0);
-	send_req(plci);
-}
-
-
-static void dtmf_parameter_clear_config(PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_clear_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	plci->dtmf_parameter_length = 0;
-}
-
-
-static void dtmf_parameter_prepare_switch(dword Id, PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_prepare_switch",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-}
-
-
-static word dtmf_parameter_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_save_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	return (GOOD);
-}
-
-
-static word dtmf_parameter_restore_config(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-
-	dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_restore_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	Info = GOOD;
-	if ((plci->B1_facilities & B1_FACILITY_DTMFR)
-	    && (plci->dtmf_parameter_length != 0))
-	{
-		switch (plci->adjust_b_state)
-		{
-		case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
-			plci->internal_command = plci->adjust_b_command;
-			if (plci->sig_req)
-			{
-				plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
-				break;
-			}
-			dtmf_parameter_write(plci);
-			plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_2;
-			break;
-		case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Restore DTMF parameters failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _WRONG_STATE;
-				break;
-			}
-			break;
-		}
-	}
-	return (Info);
-}
-
-
-/*------------------------------------------------------------------*/
-/* Line interconnect facilities                                     */
-/*------------------------------------------------------------------*/
-
-
-LI_CONFIG   *li_config_table;
-word li_total_channels;
-
-
-/*------------------------------------------------------------------*/
-/* translate a CHI information element to a channel number          */
-/* returns 0xff - any channel                                       */
-/*         0xfe - chi wrong coding                                  */
-/*         0xfd - D-channel                                         */
-/*         0x00 - no channel                                        */
-/*         else channel number / PRI: timeslot                      */
-/* if channels is provided we accept more than one channel.         */
-/*------------------------------------------------------------------*/
-
-static byte chi_to_channel(byte *chi, dword *pchannelmap)
-{
-	int p;
-	int i;
-	dword map;
-	byte excl;
-	byte ofs;
-	byte ch;
-
-	if (pchannelmap) *pchannelmap = 0;
-	if (!chi[0]) return 0xff;
-	excl = 0;
-
-	if (chi[1] & 0x20) {
-		if (chi[0] == 1 && chi[1] == 0xac) return 0xfd; /* exclusive d-channel */
-		for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
-		if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
-		if ((chi[1] | 0xc8) != 0xe9) return 0xfe;
-		if (chi[1] & 0x08) excl = 0x40;
-
-		/* int. id present */
-		if (chi[1] & 0x40) {
-			p = i + 1;
-			for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
-			if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
-		}
-
-		/* coding standard, Number/Map, Channel Type */
-		p = i + 1;
-		for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
-		if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
-		if ((chi[p] | 0xd0) != 0xd3) return 0xfe;
-
-		/* Number/Map */
-		if (chi[p] & 0x10) {
-
-			/* map */
-			if ((chi[0] - p) == 4) ofs = 0;
-			else if ((chi[0] - p) == 3) ofs = 1;
-			else return 0xfe;
-			ch = 0;
-			map = 0;
-			for (i = 0; i < 4 && p < chi[0]; i++) {
-				p++;
-				ch += 8;
-				map <<= 8;
-				if (chi[p]) {
-					for (ch = 0; !(chi[p] & (1 << ch)); ch++);
-					map |= chi[p];
-				}
-			}
-			ch += ofs;
-			map <<= ofs;
-		}
-		else {
-
-			/* number */
-			p = i + 1;
-			ch = chi[p] & 0x3f;
-			if (pchannelmap) {
-				if ((byte)(chi[0] - p) > 30) return 0xfe;
-				map = 0;
-				for (i = p; i <= chi[0]; i++) {
-					if ((chi[i] & 0x7f) > 31) return 0xfe;
-					map |= (1L << (chi[i] & 0x7f));
-				}
-			}
-			else {
-				if (p != chi[0]) return 0xfe;
-				if (ch > 31) return 0xfe;
-				map = (1L << ch);
-			}
-			if (chi[p] & 0x40) return 0xfe;
-		}
-		if (pchannelmap) *pchannelmap = map;
-		else if (map != ((dword)(1L << ch))) return 0xfe;
-		return (byte)(excl | ch);
-	}
-	else {  /* not PRI */
-		for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
-		if (i != chi[0] || !(chi[i] & 0x80)) return 0xfe;
-		if (chi[1] & 0x08) excl = 0x40;
-
-		switch (chi[1] | 0x98) {
-		case 0x98: return 0;
-		case 0x99:
-			if (pchannelmap) *pchannelmap = 2;
-			return excl | 1;
-		case 0x9a:
-			if (pchannelmap) *pchannelmap = 4;
-			return excl | 2;
-		case 0x9b: return 0xff;
-		case 0x9c: return 0xfd; /* d-ch */
-		default: return 0xfe;
-		}
-	}
-}
-
-
-static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id)
-{
-	DIVA_CAPI_ADAPTER *a;
-	PLCI *splci;
-	byte old_id;
-
-	a = plci->adapter;
-	old_id = plci->li_bchannel_id;
-	if (a->li_pri)
-	{
-		if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
-			li_config_table[a->li_base + (old_id - 1)].plci = NULL;
-		plci->li_bchannel_id = (bchannel_id & 0x1f) + 1;
-		if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
-			li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
-	}
-	else
-	{
-		if (((bchannel_id & 0x03) == 1) || ((bchannel_id & 0x03) == 2))
-		{
-			if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
-				li_config_table[a->li_base + (old_id - 1)].plci = NULL;
-			plci->li_bchannel_id = bchannel_id & 0x03;
-			if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
-			{
-				splci = a->AdvSignalPLCI;
-				if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
-				{
-					if ((splci->li_bchannel_id != 0)
-					    && (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
-					{
-						li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
-					}
-					splci->li_bchannel_id = 3 - plci->li_bchannel_id;
-					li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
-					dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id_esc %d",
-							(dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
-							(char *)(FILE_), __LINE__, splci->li_bchannel_id));
-				}
-			}
-			if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
-				li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
-		}
-	}
-	if ((old_id == 0) && (plci->li_bchannel_id != 0)
-	    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-	{
-		mixer_clear_config(plci);
-	}
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id_esc %d %d",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, bchannel_id, plci->li_bchannel_id));
-}
-
-
-static void mixer_set_bchannel_id(PLCI *plci, byte *chi)
-{
-	DIVA_CAPI_ADAPTER *a;
-	PLCI *splci;
-	byte ch, old_id;
-
-	a = plci->adapter;
-	old_id = plci->li_bchannel_id;
-	ch = chi_to_channel(chi, NULL);
-	if (!(ch & 0x80))
-	{
-		if (a->li_pri)
-		{
-			if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
-				li_config_table[a->li_base + (old_id - 1)].plci = NULL;
-			plci->li_bchannel_id = (ch & 0x1f) + 1;
-			if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
-				li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
-		}
-		else
-		{
-			if (((ch & 0x1f) == 1) || ((ch & 0x1f) == 2))
-			{
-				if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
-					li_config_table[a->li_base + (old_id - 1)].plci = NULL;
-				plci->li_bchannel_id = ch & 0x1f;
-				if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
-				{
-					splci = a->AdvSignalPLCI;
-					if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
-					{
-						if ((splci->li_bchannel_id != 0)
-						    && (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
-						{
-							li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
-						}
-						splci->li_bchannel_id = 3 - plci->li_bchannel_id;
-						li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
-						dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
-								(dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
-								(char *)(FILE_), __LINE__, splci->li_bchannel_id));
-					}
-				}
-				if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
-					li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
-			}
-		}
-	}
-	if ((old_id == 0) && (plci->li_bchannel_id != 0)
-	    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-	{
-		mixer_clear_config(plci);
-	}
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id %02x %d",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, ch, plci->li_bchannel_id));
-}
-
-
-#define MIXER_MAX_DUMP_CHANNELS 34
-
-static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
-{
-	word n, i, j;
-	char *p;
-	char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4];
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_calculate_coefs",
-			(dword)(UnMapController(a->Id)), (char *)(FILE_), __LINE__));
-
-	for (i = 0; i < li_total_channels; i++)
-	{
-		li_config_table[i].channel &= LI_CHANNEL_ADDRESSES_SET;
-		if (li_config_table[i].chflags != 0)
-			li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
-		else
-		{
-			for (j = 0; j < li_total_channels; j++)
-			{
-				if (((li_config_table[i].flag_table[j]) != 0)
-				    || ((li_config_table[j].flag_table[i]) != 0))
-				{
-					li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
-				}
-				if (((li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE) != 0)
-				    || ((li_config_table[j].flag_table[i] & LI_FLAG_CONFERENCE) != 0))
-				{
-					li_config_table[i].channel |= LI_CHANNEL_CONFERENCE;
-				}
-			}
-		}
-	}
-	for (i = 0; i < li_total_channels; i++)
-	{
-		for (j = 0; j < li_total_channels; j++)
-		{
-			li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC);
-			if (li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE)
-				li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
-		}
-	}
-	for (n = 0; n < li_total_channels; n++)
-	{
-		if (li_config_table[n].channel & LI_CHANNEL_CONFERENCE)
-		{
-			for (i = 0; i < li_total_channels; i++)
-			{
-				if (li_config_table[i].channel & LI_CHANNEL_CONFERENCE)
-				{
-					for (j = 0; j < li_total_channels; j++)
-					{
-						li_config_table[i].coef_table[j] |=
-							li_config_table[i].coef_table[n] & li_config_table[n].coef_table[j];
-					}
-				}
-			}
-		}
-	}
-	for (i = 0; i < li_total_channels; i++)
-	{
-		if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
-		{
-			li_config_table[i].coef_table[i] &= ~LI_COEF_CH_CH;
-			for (j = 0; j < li_total_channels; j++)
-			{
-				if (li_config_table[i].coef_table[j] & LI_COEF_CH_CH)
-					li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE;
-			}
-			if (li_config_table[i].flag_table[i] & LI_FLAG_CONFERENCE)
-				li_config_table[i].coef_table[i] |= LI_COEF_CH_CH;
-		}
-	}
-	for (i = 0; i < li_total_channels; i++)
-	{
-		if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
-		{
-			for (j = 0; j < li_total_channels; j++)
-			{
-				if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
-					li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
-				if (li_config_table[i].flag_table[j] & LI_FLAG_MONITOR)
-					li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
-				if (li_config_table[i].flag_table[j] & LI_FLAG_MIX)
-					li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
-				if (li_config_table[i].flag_table[j] & LI_FLAG_PCCONNECT)
-					li_config_table[i].coef_table[j] |= LI_COEF_PC_PC;
-			}
-			if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
-			{
-				for (j = 0; j < li_total_channels; j++)
-				{
-					if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
-					{
-						li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
-						if (li_config_table[j].chflags & LI_CHFLAG_MIX)
-							li_config_table[i].coef_table[j] |= LI_COEF_PC_CH | LI_COEF_PC_PC;
-					}
-				}
-			}
-			if (li_config_table[i].chflags & LI_CHFLAG_MIX)
-			{
-				for (j = 0; j < li_total_channels; j++)
-				{
-					if (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT)
-						li_config_table[j].coef_table[i] |= LI_COEF_PC_CH;
-				}
-			}
-			if (li_config_table[i].chflags & LI_CHFLAG_LOOP)
-			{
-				for (j = 0; j < li_total_channels; j++)
-				{
-					if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
-					{
-						for (n = 0; n < li_total_channels; n++)
-						{
-							if (li_config_table[n].flag_table[i] & LI_FLAG_INTERCONNECT)
-							{
-								li_config_table[n].coef_table[j] |= LI_COEF_CH_CH;
-								if (li_config_table[j].chflags & LI_CHFLAG_MIX)
-								{
-									li_config_table[n].coef_table[j] |= LI_COEF_PC_CH;
-									if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
-										li_config_table[n].coef_table[j] |= LI_COEF_CH_PC | LI_COEF_PC_PC;
-								}
-								else if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
-									li_config_table[n].coef_table[j] |= LI_COEF_CH_PC;
-							}
-						}
-					}
-				}
-			}
-		}
-	}
-	for (i = 0; i < li_total_channels; i++)
-	{
-		if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
-		{
-			if (li_config_table[i].chflags & (LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP))
-				li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
-			if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
-				li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
-			if (li_config_table[i].chflags & LI_CHFLAG_MIX)
-				li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
-			for (j = 0; j < li_total_channels; j++)
-			{
-				if ((li_config_table[i].flag_table[j] &
-				     (LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_MONITOR))
-				    || (li_config_table[j].flag_table[i] &
-					(LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX)))
-				{
-					li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
-				}
-				if (li_config_table[i].flag_table[j] & (LI_FLAG_PCCONNECT | LI_FLAG_MONITOR))
-					li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
-				if (li_config_table[j].flag_table[i] & (LI_FLAG_PCCONNECT | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX))
-					li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
-			}
-			if (!(li_config_table[i].channel & LI_CHANNEL_ACTIVE))
-			{
-				li_config_table[i].coef_table[i] |= LI_COEF_PC_CH | LI_COEF_CH_PC;
-				li_config_table[i].channel |= LI_CHANNEL_TX_DATA | LI_CHANNEL_RX_DATA;
-			}
-		}
-	}
-	for (i = 0; i < li_total_channels; i++)
-	{
-		if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
-		{
-			j = 0;
-			while ((j < li_total_channels) && !(li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT))
-				j++;
-			if (j < li_total_channels)
-			{
-				for (j = 0; j < li_total_channels; j++)
-				{
-					li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_PC_CH);
-					if (li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT)
-						li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
-				}
-			}
-		}
-	}
-	n = li_total_channels;
-	if (n > MIXER_MAX_DUMP_CHANNELS)
-		n = MIXER_MAX_DUMP_CHANNELS;
-
-	p = hex_line;
-	for (j = 0; j < n; j++)
-	{
-		if ((j & 0x7) == 0)
-			*(p++) = ' ';
-		p = hex_byte_pack(p, li_config_table[j].curchnl);
-	}
-	*p = '\0';
-	dbug(1, dprintf("[%06lx] CURRENT %s",
-			(dword)(UnMapController(a->Id)), (char *)hex_line));
-	p = hex_line;
-	for (j = 0; j < n; j++)
-	{
-		if ((j & 0x7) == 0)
-			*(p++) = ' ';
-		p = hex_byte_pack(p, li_config_table[j].channel);
-	}
-	*p = '\0';
-	dbug(1, dprintf("[%06lx] CHANNEL %s",
-			(dword)(UnMapController(a->Id)), (char *)hex_line));
-	p = hex_line;
-	for (j = 0; j < n; j++)
-	{
-		if ((j & 0x7) == 0)
-			*(p++) = ' ';
-		p = hex_byte_pack(p, li_config_table[j].chflags);
-	}
-	*p = '\0';
-	dbug(1, dprintf("[%06lx] CHFLAG  %s",
-			(dword)(UnMapController(a->Id)), (char *)hex_line));
-	for (i = 0; i < n; i++)
-	{
-		p = hex_line;
-		for (j = 0; j < n; j++)
-		{
-			if ((j & 0x7) == 0)
-				*(p++) = ' ';
-			p = hex_byte_pack(p, li_config_table[i].flag_table[j]);
-		}
-		*p = '\0';
-		dbug(1, dprintf("[%06lx] FLAG[%02x]%s",
-				(dword)(UnMapController(a->Id)), i, (char *)hex_line));
-	}
-	for (i = 0; i < n; i++)
-	{
-		p = hex_line;
-		for (j = 0; j < n; j++)
-		{
-			if ((j & 0x7) == 0)
-				*(p++) = ' ';
-			p = hex_byte_pack(p, li_config_table[i].coef_table[j]);
-		}
-		*p = '\0';
-		dbug(1, dprintf("[%06lx] COEF[%02x]%s",
-				(dword)(UnMapController(a->Id)), i, (char *)hex_line));
-	}
-}
-
-
-static struct
-{
-	byte mask;
-	byte line_flags;
-} mixer_write_prog_pri[] =
-{
-	{ LI_COEF_CH_CH, 0 },
-	{ LI_COEF_CH_PC, MIXER_COEF_LINE_TO_PC_FLAG },
-	{ LI_COEF_PC_CH, MIXER_COEF_LINE_FROM_PC_FLAG },
-	{ LI_COEF_PC_PC, MIXER_COEF_LINE_TO_PC_FLAG | MIXER_COEF_LINE_FROM_PC_FLAG }
-};
-
-static struct
-{
-	byte from_ch;
-	byte to_ch;
-	byte mask;
-	byte xconnect_override;
-} mixer_write_prog_bri[] =
-{
-	{ 0, 0, LI_COEF_CH_CH, 0x01 },  /* B      to B      */
-	{ 1, 0, LI_COEF_CH_CH, 0x01 },  /* Alt B  to B      */
-	{ 0, 0, LI_COEF_PC_CH, 0x80 },  /* PC     to B      */
-	{ 1, 0, LI_COEF_PC_CH, 0x01 },  /* Alt PC to B      */
-	{ 2, 0, LI_COEF_CH_CH, 0x00 },  /* IC     to B      */
-	{ 3, 0, LI_COEF_CH_CH, 0x00 },  /* Alt IC to B      */
-	{ 0, 0, LI_COEF_CH_PC, 0x80 },  /* B      to PC     */
-	{ 1, 0, LI_COEF_CH_PC, 0x01 },  /* Alt B  to PC     */
-	{ 0, 0, LI_COEF_PC_PC, 0x01 },  /* PC     to PC     */
-	{ 1, 0, LI_COEF_PC_PC, 0x01 },  /* Alt PC to PC     */
-	{ 2, 0, LI_COEF_CH_PC, 0x00 },  /* IC     to PC     */
-	{ 3, 0, LI_COEF_CH_PC, 0x00 },  /* Alt IC to PC     */
-	{ 0, 2, LI_COEF_CH_CH, 0x00 },  /* B      to IC     */
-	{ 1, 2, LI_COEF_CH_CH, 0x00 },  /* Alt B  to IC     */
-	{ 0, 2, LI_COEF_PC_CH, 0x00 },  /* PC     to IC     */
-	{ 1, 2, LI_COEF_PC_CH, 0x00 },  /* Alt PC to IC     */
-	{ 2, 2, LI_COEF_CH_CH, 0x00 },  /* IC     to IC     */
-	{ 3, 2, LI_COEF_CH_CH, 0x00 },  /* Alt IC to IC     */
-	{ 1, 1, LI_COEF_CH_CH, 0x01 },  /* Alt B  to Alt B  */
-	{ 0, 1, LI_COEF_CH_CH, 0x01 },  /* B      to Alt B  */
-	{ 1, 1, LI_COEF_PC_CH, 0x80 },  /* Alt PC to Alt B  */
-	{ 0, 1, LI_COEF_PC_CH, 0x01 },  /* PC     to Alt B  */
-	{ 3, 1, LI_COEF_CH_CH, 0x00 },  /* Alt IC to Alt B  */
-	{ 2, 1, LI_COEF_CH_CH, 0x00 },  /* IC     to Alt B  */
-	{ 1, 1, LI_COEF_CH_PC, 0x80 },  /* Alt B  to Alt PC */
-	{ 0, 1, LI_COEF_CH_PC, 0x01 },  /* B      to Alt PC */
-	{ 1, 1, LI_COEF_PC_PC, 0x01 },  /* Alt PC to Alt PC */
-	{ 0, 1, LI_COEF_PC_PC, 0x01 },  /* PC     to Alt PC */
-	{ 3, 1, LI_COEF_CH_PC, 0x00 },  /* Alt IC to Alt PC */
-	{ 2, 1, LI_COEF_CH_PC, 0x00 },  /* IC     to Alt PC */
-	{ 1, 3, LI_COEF_CH_CH, 0x00 },  /* Alt B  to Alt IC */
-	{ 0, 3, LI_COEF_CH_CH, 0x00 },  /* B      to Alt IC */
-	{ 1, 3, LI_COEF_PC_CH, 0x00 },  /* Alt PC to Alt IC */
-	{ 0, 3, LI_COEF_PC_CH, 0x00 },  /* PC     to Alt IC */
-	{ 3, 3, LI_COEF_CH_CH, 0x00 },  /* Alt IC to Alt IC */
-	{ 2, 3, LI_COEF_CH_CH, 0x00 }   /* IC     to Alt IC */
-};
-
-static byte mixer_swapped_index_bri[] =
-{
-	18,  /* B      to B      */
-	19,  /* Alt B  to B      */
-	20,  /* PC     to B      */
-	21,  /* Alt PC to B      */
-	22,  /* IC     to B      */
-	23,  /* Alt IC to B      */
-	24,  /* B      to PC     */
-	25,  /* Alt B  to PC     */
-	26,  /* PC     to PC     */
-	27,  /* Alt PC to PC     */
-	28,  /* IC     to PC     */
-	29,  /* Alt IC to PC     */
-	30,  /* B      to IC     */
-	31,  /* Alt B  to IC     */
-	32,  /* PC     to IC     */
-	33,  /* Alt PC to IC     */
-	34,  /* IC     to IC     */
-	35,  /* Alt IC to IC     */
-	0,   /* Alt B  to Alt B  */
-	1,   /* B      to Alt B  */
-	2,   /* Alt PC to Alt B  */
-	3,   /* PC     to Alt B  */
-	4,   /* Alt IC to Alt B  */
-	5,   /* IC     to Alt B  */
-	6,   /* Alt B  to Alt PC */
-	7,   /* B      to Alt PC */
-	8,   /* Alt PC to Alt PC */
-	9,   /* PC     to Alt PC */
-	10,  /* Alt IC to Alt PC */
-	11,  /* IC     to Alt PC */
-	12,  /* Alt B  to Alt IC */
-	13,  /* B      to Alt IC */
-	14,  /* Alt PC to Alt IC */
-	15,  /* PC     to Alt IC */
-	16,  /* Alt IC to Alt IC */
-	17   /* IC     to Alt IC */
-};
-
-static struct
-{
-	byte mask;
-	byte from_pc;
-	byte to_pc;
-} xconnect_write_prog[] =
-{
-	{ LI_COEF_CH_CH, false, false },
-	{ LI_COEF_CH_PC, false, true },
-	{ LI_COEF_PC_CH, true, false },
-	{ LI_COEF_PC_PC, true, true }
-};
-
-
-static void xconnect_query_addresses(PLCI *plci)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word w, ch;
-	byte *p;
-
-	dbug(1, dprintf("[%06lx] %s,%d: xconnect_query_addresses",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	a = plci->adapter;
-	if (a->li_pri && ((plci->li_bchannel_id == 0)
-			  || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci)))
-	{
-		dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
-				(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-				(char *)(FILE_), __LINE__));
-		return;
-	}
-	p = plci->internal_req_buffer;
-	ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
-	*(p++) = UDATA_REQUEST_XCONNECT_FROM;
-	w = ch;
-	*(p++) = (byte) w;
-	*(p++) = (byte)(w >> 8);
-	w = ch | XCONNECT_CHANNEL_PORT_PC;
-	*(p++) = (byte) w;
-	*(p++) = (byte)(w >> 8);
-	plci->NData[0].P = plci->internal_req_buffer;
-	plci->NData[0].PLength = p - plci->internal_req_buffer;
-	plci->NL.X = plci->NData;
-	plci->NL.ReqCh = 0;
-	plci->NL.Req = plci->nl_req = (byte) N_UDATA;
-	plci->adapter->request(&plci->NL);
-}
-
-
-static void xconnect_write_coefs(PLCI *plci, word internal_command)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: xconnect_write_coefs %04x",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, internal_command));
-
-	plci->li_write_command = internal_command;
-	plci->li_write_channel = 0;
-}
-
-
-static byte xconnect_write_coefs_process(dword Id, PLCI *plci, byte Rc)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word w, n, i, j, r, s, to_ch;
-	dword d;
-	byte *p;
-	struct xconnect_transfer_address_s   *transfer_address;
-	byte ch_map[MIXER_CHANNELS_BRI];
-
-	dbug(1, dprintf("[%06x] %s,%d: xconnect_write_coefs_process %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->li_write_channel));
-
-	a = plci->adapter;
-	if ((plci->li_bchannel_id == 0)
-	    || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
-	{
-		dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		return (true);
-	}
-	i = a->li_base + (plci->li_bchannel_id - 1);
-	j = plci->li_write_channel;
-	p = plci->internal_req_buffer;
-	if (j != 0)
-	{
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: LI write coefs failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			return (false);
-		}
-	}
-	if (li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-	{
-		r = 0;
-		s = 0;
-		if (j < li_total_channels)
-		{
-			if (li_config_table[i].channel & LI_CHANNEL_ADDRESSES_SET)
-			{
-				s = ((li_config_table[i].send_b.card_address.low | li_config_table[i].send_b.card_address.high) ?
-				     (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_PC | LI_COEF_PC_PC)) &
-					((li_config_table[i].send_pc.card_address.low | li_config_table[i].send_pc.card_address.high) ?
-					 (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_PC_CH));
-			}
-			r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
-			while ((j < li_total_channels)
-			       && ((r == 0)
-				   || (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
-				   || (!li_config_table[j].adapter->li_pri
-				       && (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
-				   || (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
-					|| (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
-				       && (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
-					   || !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
-				   || ((li_config_table[j].adapter->li_base != a->li_base)
-				       && !(r & s &
-					    ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
-					     (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
-					    ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
-					     (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))))
-			{
-				j++;
-				if (j < li_total_channels)
-					r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
-			}
-		}
-		if (j < li_total_channels)
-		{
-			plci->internal_command = plci->li_write_command;
-			if (plci_nl_busy(plci))
-				return (true);
-			to_ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
-			*(p++) = UDATA_REQUEST_XCONNECT_TO;
-			do
-			{
-				if (li_config_table[j].adapter->li_base != a->li_base)
-				{
-					r &= s &
-						((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
-						 (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
-						((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
-						 (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC));
-				}
-				n = 0;
-				do
-				{
-					if (r & xconnect_write_prog[n].mask)
-					{
-						if (xconnect_write_prog[n].from_pc)
-							transfer_address = &(li_config_table[j].send_pc);
-						else
-							transfer_address = &(li_config_table[j].send_b);
-						d = transfer_address->card_address.low;
-						*(p++) = (byte) d;
-						*(p++) = (byte)(d >> 8);
-						*(p++) = (byte)(d >> 16);
-						*(p++) = (byte)(d >> 24);
-						d = transfer_address->card_address.high;
-						*(p++) = (byte) d;
-						*(p++) = (byte)(d >> 8);
-						*(p++) = (byte)(d >> 16);
-						*(p++) = (byte)(d >> 24);
-						d = transfer_address->offset;
-						*(p++) = (byte) d;
-						*(p++) = (byte)(d >> 8);
-						*(p++) = (byte)(d >> 16);
-						*(p++) = (byte)(d >> 24);
-						w = xconnect_write_prog[n].to_pc ? to_ch | XCONNECT_CHANNEL_PORT_PC : to_ch;
-						*(p++) = (byte) w;
-						*(p++) = (byte)(w >> 8);
-						w = ((li_config_table[i].coef_table[j] & xconnect_write_prog[n].mask) == 0) ? 0x01 :
-							(li_config_table[i].adapter->u_law ?
-							 (li_config_table[j].adapter->u_law ? 0x80 : 0x86) :
-							 (li_config_table[j].adapter->u_law ? 0x7a : 0x80));
-						*(p++) = (byte) w;
-						*(p++) = (byte) 0;
-						li_config_table[i].coef_table[j] ^= xconnect_write_prog[n].mask << 4;
-					}
-					n++;
-				} while ((n < ARRAY_SIZE(xconnect_write_prog))
-					 && ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
-				if (n == ARRAY_SIZE(xconnect_write_prog))
-				{
-					do
-					{
-						j++;
-						if (j < li_total_channels)
-							r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
-					} while ((j < li_total_channels)
-						 && ((r == 0)
-						     || (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
-						     || (!li_config_table[j].adapter->li_pri
-							 && (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
-						     || (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
-							  || (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
-							 && (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
-							     || !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
-						     || ((li_config_table[j].adapter->li_base != a->li_base)
-							 && !(r & s &
-							      ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
-							       (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
-							      ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
-							       (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))));
-				}
-			} while ((j < li_total_channels)
-				 && ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
-		}
-		else if (j == li_total_channels)
-		{
-			plci->internal_command = plci->li_write_command;
-			if (plci_nl_busy(plci))
-				return (true);
-			if (a->li_pri)
-			{
-				*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
-				w = 0;
-				if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
-					w |= MIXER_FEATURE_ENABLE_TX_DATA;
-				if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
-					w |= MIXER_FEATURE_ENABLE_RX_DATA;
-				*(p++) = (byte) w;
-				*(p++) = (byte)(w >> 8);
-			}
-			else
-			{
-				*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
-				w = 0;
-				if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
-				    && (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
-				{
-					w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
-				}
-				if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
-					w |= MIXER_FEATURE_ENABLE_TX_DATA;
-				if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
-					w |= MIXER_FEATURE_ENABLE_RX_DATA;
-				*(p++) = (byte) w;
-				*(p++) = (byte)(w >> 8);
-				for (j = 0; j < sizeof(ch_map); j += 2)
-				{
-					if (plci->li_bchannel_id == 2)
-					{
-						ch_map[j] = (byte)(j + 1);
-						ch_map[j + 1] = (byte) j;
-					}
-					else
-					{
-						ch_map[j] = (byte) j;
-						ch_map[j + 1] = (byte)(j + 1);
-					}
-				}
-				for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
-				{
-					i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
-					j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
-					if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
-					{
-						*p = (mixer_write_prog_bri[n].xconnect_override != 0) ?
-							mixer_write_prog_bri[n].xconnect_override :
-							((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
-						if ((i >= a->li_base + MIXER_BCHANNELS_BRI) || (j >= a->li_base + MIXER_BCHANNELS_BRI))
-						{
-							w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
-							li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
-						}
-					}
-					else
-					{
-						*p = 0x00;
-						if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
-						{
-							w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
-							if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
-								*p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
-						}
-					}
-					p++;
-				}
-			}
-			j = li_total_channels + 1;
-		}
-	}
-	else
-	{
-		if (j <= li_total_channels)
-		{
-			plci->internal_command = plci->li_write_command;
-			if (plci_nl_busy(plci))
-				return (true);
-			if (j < a->li_base)
-				j = a->li_base;
-			if (a->li_pri)
-			{
-				*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
-				w = 0;
-				if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
-					w |= MIXER_FEATURE_ENABLE_TX_DATA;
-				if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
-					w |= MIXER_FEATURE_ENABLE_RX_DATA;
-				*(p++) = (byte) w;
-				*(p++) = (byte)(w >> 8);
-				for (n = 0; n < ARRAY_SIZE(mixer_write_prog_pri); n++)
-				{
-					*(p++) = (byte)((plci->li_bchannel_id - 1) | mixer_write_prog_pri[n].line_flags);
-					for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
-					{
-						w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
-						if (w & mixer_write_prog_pri[n].mask)
-						{
-							*(p++) = (li_config_table[i].coef_table[j] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
-							li_config_table[i].coef_table[j] ^= mixer_write_prog_pri[n].mask << 4;
-						}
-						else
-							*(p++) = 0x00;
-					}
-					*(p++) = (byte)((plci->li_bchannel_id - 1) | MIXER_COEF_LINE_ROW_FLAG | mixer_write_prog_pri[n].line_flags);
-					for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
-					{
-						w = ((li_config_table[j].coef_table[i] & 0xf) ^ (li_config_table[j].coef_table[i] >> 4));
-						if (w & mixer_write_prog_pri[n].mask)
-						{
-							*(p++) = (li_config_table[j].coef_table[i] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
-							li_config_table[j].coef_table[i] ^= mixer_write_prog_pri[n].mask << 4;
-						}
-						else
-							*(p++) = 0x00;
-					}
-				}
-			}
-			else
-			{
-				*(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
-				w = 0;
-				if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
-				    && (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
-				{
-					w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
-				}
-				if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
-					w |= MIXER_FEATURE_ENABLE_TX_DATA;
-				if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
-					w |= MIXER_FEATURE_ENABLE_RX_DATA;
-				*(p++) = (byte) w;
-				*(p++) = (byte)(w >> 8);
-				for (j = 0; j < sizeof(ch_map); j += 2)
-				{
-					if (plci->li_bchannel_id == 2)
-					{
-						ch_map[j] = (byte)(j + 1);
-						ch_map[j + 1] = (byte) j;
-					}
-					else
-					{
-						ch_map[j] = (byte) j;
-						ch_map[j + 1] = (byte)(j + 1);
-					}
-				}
-				for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
-				{
-					i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
-					j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
-					if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
-					{
-						*p = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
-						w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
-						li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
-					}
-					else
-					{
-						*p = 0x00;
-						if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
-						{
-							w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
-							if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
-								*p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
-						}
-					}
-					p++;
-				}
-			}
-			j = li_total_channels + 1;
-		}
-	}
-	plci->li_write_channel = j;
-	if (p != plci->internal_req_buffer)
-	{
-		plci->NData[0].P = plci->internal_req_buffer;
-		plci->NData[0].PLength = p - plci->internal_req_buffer;
-		plci->NL.X = plci->NData;
-		plci->NL.ReqCh = 0;
-		plci->NL.Req = plci->nl_req = (byte) N_UDATA;
-		plci->adapter->request(&plci->NL);
-	}
-	return (true);
-}
-
-
-static void mixer_notify_update(PLCI *plci, byte others)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word i, w;
-	PLCI *notify_plci;
-	byte msg[sizeof(CAPI_MSG_HEADER) + 6];
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_notify_update %d",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, others));
-
-	a = plci->adapter;
-	if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
-	{
-		if (others)
-			plci->li_notify_update = true;
-		i = 0;
-		do
-		{
-			notify_plci = NULL;
-			if (others)
-			{
-				while ((i < li_total_channels) && (li_config_table[i].plci == NULL))
-					i++;
-				if (i < li_total_channels)
-					notify_plci = li_config_table[i++].plci;
-			}
-			else
-			{
-				if ((plci->li_bchannel_id != 0)
-				    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-				{
-					notify_plci = plci;
-				}
-			}
-			if ((notify_plci != NULL)
-			    && !notify_plci->li_notify_update
-			    && (notify_plci->appl != NULL)
-			    && (notify_plci->State)
-			    && notify_plci->NL.Id && !notify_plci->nl_remove_id)
-			{
-				notify_plci->li_notify_update = true;
-				((CAPI_MSG *) msg)->header.length = 18;
-				((CAPI_MSG *) msg)->header.appl_id = notify_plci->appl->Id;
-				((CAPI_MSG *) msg)->header.command = _FACILITY_R;
-				((CAPI_MSG *) msg)->header.number = 0;
-				((CAPI_MSG *) msg)->header.controller = notify_plci->adapter->Id;
-				((CAPI_MSG *) msg)->header.plci = notify_plci->Id;
-				((CAPI_MSG *) msg)->header.ncci = 0;
-				((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
-				((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
-				((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
-				((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
-				((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
-				w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
-				if (w != _QUEUE_FULL)
-				{
-					if (w != 0)
-					{
-						dbug(1, dprintf("[%06lx] %s,%d: Interconnect notify failed %06x %d",
-								(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-								(char *)(FILE_), __LINE__,
-								(dword)((notify_plci->Id << 8) | UnMapController(notify_plci->adapter->Id)), w));
-					}
-					notify_plci->li_notify_update = false;
-				}
-			}
-		} while (others && (notify_plci != NULL));
-		if (others)
-			plci->li_notify_update = false;
-	}
-}
-
-
-static void mixer_clear_config(PLCI *plci)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word i, j;
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_clear_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	plci->li_notify_update = false;
-	plci->li_plci_b_write_pos = 0;
-	plci->li_plci_b_read_pos = 0;
-	plci->li_plci_b_req_pos = 0;
-	a = plci->adapter;
-	if ((plci->li_bchannel_id != 0)
-	    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-	{
-		i = a->li_base + (plci->li_bchannel_id - 1);
-		li_config_table[i].curchnl = 0;
-		li_config_table[i].channel = 0;
-		li_config_table[i].chflags = 0;
-		for (j = 0; j < li_total_channels; j++)
-		{
-			li_config_table[j].flag_table[i] = 0;
-			li_config_table[i].flag_table[j] = 0;
-			li_config_table[i].coef_table[j] = 0;
-			li_config_table[j].coef_table[i] = 0;
-		}
-		if (!a->li_pri)
-		{
-			li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
-			if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
-			{
-				i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
-				li_config_table[i].curchnl = 0;
-				li_config_table[i].channel = 0;
-				li_config_table[i].chflags = 0;
-				for (j = 0; j < li_total_channels; j++)
-				{
-					li_config_table[i].flag_table[j] = 0;
-					li_config_table[j].flag_table[i] = 0;
-					li_config_table[i].coef_table[j] = 0;
-					li_config_table[j].coef_table[i] = 0;
-				}
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
-				{
-					i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
-					li_config_table[i].curchnl = 0;
-					li_config_table[i].channel = 0;
-					li_config_table[i].chflags = 0;
-					for (j = 0; j < li_total_channels; j++)
-					{
-						li_config_table[i].flag_table[j] = 0;
-						li_config_table[j].flag_table[i] = 0;
-						li_config_table[i].coef_table[j] = 0;
-						li_config_table[j].coef_table[i] = 0;
-					}
-				}
-			}
-		}
-	}
-}
-
-
-static void mixer_prepare_switch(dword Id, PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_prepare_switch",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	do
-	{
-		mixer_indication_coefs_set(Id, plci);
-	} while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
-}
-
-
-static word mixer_save_config(dword Id, PLCI *plci, byte Rc)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word i, j;
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_save_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	a = plci->adapter;
-	if ((plci->li_bchannel_id != 0)
-	    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-	{
-		i = a->li_base + (plci->li_bchannel_id - 1);
-		for (j = 0; j < li_total_channels; j++)
-		{
-			li_config_table[i].coef_table[j] &= 0xf;
-			li_config_table[j].coef_table[i] &= 0xf;
-		}
-		if (!a->li_pri)
-			li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
-	}
-	return (GOOD);
-}
-
-
-static word mixer_restore_config(dword Id, PLCI *plci, byte Rc)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word Info;
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_restore_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	Info = GOOD;
-	a = plci->adapter;
-	if ((plci->B1_facilities & B1_FACILITY_MIXER)
-	    && (plci->li_bchannel_id != 0)
-	    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-	{
-		switch (plci->adjust_b_state)
-		{
-		case ADJUST_B_RESTORE_MIXER_1:
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-			{
-				plci->internal_command = plci->adjust_b_command;
-				if (plci_nl_busy(plci))
-				{
-					plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
-					break;
-				}
-				xconnect_query_addresses(plci);
-				plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_2;
-				break;
-			}
-			plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
-			Rc = OK;
-			/* fall through */
-		case ADJUST_B_RESTORE_MIXER_2:
-		case ADJUST_B_RESTORE_MIXER_3:
-		case ADJUST_B_RESTORE_MIXER_4:
-			if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Adjust B query addresses failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _WRONG_STATE;
-				break;
-			}
-			if (Rc == OK)
-			{
-				if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
-					plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_3;
-				else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)
-					plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
-			}
-			else if (Rc == 0)
-			{
-				if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
-					plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_4;
-				else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
-					plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
-			}
-			if (plci->adjust_b_state != ADJUST_B_RESTORE_MIXER_5)
-			{
-				plci->internal_command = plci->adjust_b_command;
-				break;
-			}
-			/* fall through */
-		case ADJUST_B_RESTORE_MIXER_5:
-			xconnect_write_coefs(plci, plci->adjust_b_command);
-			plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_6;
-			Rc = OK;
-			/* fall through */
-		case ADJUST_B_RESTORE_MIXER_6:
-			if (!xconnect_write_coefs_process(Id, plci, Rc))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			if (plci->internal_command)
-				break;
-			plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_7;
-		case ADJUST_B_RESTORE_MIXER_7:
-			break;
-		}
-	}
-	return (Info);
-}
-
-
-static void mixer_command(dword Id, PLCI *plci, byte Rc)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word i, internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_command %02x %04x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
-			plci->li_cmd));
-
-	a = plci->adapter;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (plci->li_cmd)
-	{
-	case LI_REQ_CONNECT:
-	case LI_REQ_DISCONNECT:
-	case LI_REQ_SILENT_UPDATE:
-		switch (internal_command)
-		{
-		default:
-			if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
-			{
-				adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
-									  B1_FACILITY_MIXER), MIXER_COMMAND_1);
-			}
-			/* fall through */
-		case MIXER_COMMAND_1:
-			if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
-			{
-				if (adjust_b_process(Id, plci, Rc) != GOOD)
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Load mixer failed",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					break;
-				}
-				if (plci->internal_command)
-					return;
-			}
-			plci->li_plci_b_req_pos = plci->li_plci_b_write_pos;
-			if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
-			    || ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
-				&& (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
-										      ~B1_FACILITY_MIXER)) == plci->B1_resource)))
-			{
-				xconnect_write_coefs(plci, MIXER_COMMAND_2);
-			}
-			else
-			{
-				do
-				{
-					mixer_indication_coefs_set(Id, plci);
-				} while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
-			}
-			/* fall through */
-		case MIXER_COMMAND_2:
-			if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
-			    || ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
-				&& (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
-										      ~B1_FACILITY_MIXER)) == plci->B1_resource)))
-			{
-				if (!xconnect_write_coefs_process(Id, plci, Rc))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					if (plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
-					{
-						do
-						{
-							plci->li_plci_b_write_pos = (plci->li_plci_b_write_pos == 0) ?
-								LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
-							i = (plci->li_plci_b_write_pos == 0) ?
-								LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
-						} while ((plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
-							 && !(plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG));
-					}
-					break;
-				}
-				if (plci->internal_command)
-					return;
-			}
-			if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
-			{
-				adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
-									  ~B1_FACILITY_MIXER), MIXER_COMMAND_3);
-			}
-			/* fall through */
-		case MIXER_COMMAND_3:
-			if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
-			{
-				if (adjust_b_process(Id, plci, Rc) != GOOD)
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Unload mixer failed",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					break;
-				}
-				if (plci->internal_command)
-					return;
-			}
-			break;
-		}
-		break;
-	}
-	if ((plci->li_bchannel_id == 0)
-	    || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
-	{
-		dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out %d",
-				UnMapId(Id), (char *)(FILE_), __LINE__, (int)(plci->li_bchannel_id)));
-	}
-	else
-	{
-		i = a->li_base + (plci->li_bchannel_id - 1);
-		li_config_table[i].curchnl = plci->li_channel_bits;
-		if (!a->li_pri && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
-		{
-			i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
-			li_config_table[i].curchnl = plci->li_channel_bits;
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
-			{
-				i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
-				li_config_table[i].curchnl = plci->li_channel_bits;
-			}
-		}
-	}
-}
-
-
-static void li_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
-			      dword plci_b_id, byte connect, dword li_flags)
-{
-	word i, ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
-	PLCI *plci_b;
-	DIVA_CAPI_ADAPTER *a_b;
-
-	a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
-	plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
-	ch_a = a->li_base + (plci->li_bchannel_id - 1);
-	if (!a->li_pri && (plci->tel == ADV_VOICE)
-	    && (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
-	{
-		ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
-		ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
-			a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
-	}
-	else
-	{
-		ch_a_v = ch_a;
-		ch_a_s = ch_a;
-	}
-	ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
-	if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
-	    && (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
-	{
-		ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
-		ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
-			a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
-	}
-	else
-	{
-		ch_b_v = ch_b;
-		ch_b_s = ch_b;
-	}
-	if (connect)
-	{
-		li_config_table[ch_a].flag_table[ch_a_v] &= ~LI_FLAG_MONITOR;
-		li_config_table[ch_a].flag_table[ch_a_s] &= ~LI_FLAG_MONITOR;
-		li_config_table[ch_a_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
-		li_config_table[ch_a_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
-	}
-	li_config_table[ch_a].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
-	li_config_table[ch_a].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
-	li_config_table[ch_b_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
-	li_config_table[ch_b_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
-	if (ch_a_v == ch_b_v)
-	{
-		li_config_table[ch_a_v].flag_table[ch_b_v] &= ~LI_FLAG_CONFERENCE;
-		li_config_table[ch_a_s].flag_table[ch_b_s] &= ~LI_FLAG_CONFERENCE;
-	}
-	else
-	{
-		if (li_config_table[ch_a_v].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
-		{
-			for (i = 0; i < li_total_channels; i++)
-			{
-				if (i != ch_a_v)
-					li_config_table[ch_a_v].flag_table[i] &= ~LI_FLAG_CONFERENCE;
-			}
-		}
-		if (li_config_table[ch_a_s].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
-		{
-			for (i = 0; i < li_total_channels; i++)
-			{
-				if (i != ch_a_s)
-					li_config_table[ch_a_s].flag_table[i] &= ~LI_FLAG_CONFERENCE;
-			}
-		}
-		if (li_config_table[ch_b_v].flag_table[ch_a_v] & LI_FLAG_CONFERENCE)
-		{
-			for (i = 0; i < li_total_channels; i++)
-			{
-				if (i != ch_a_v)
-					li_config_table[i].flag_table[ch_a_v] &= ~LI_FLAG_CONFERENCE;
-			}
-		}
-		if (li_config_table[ch_b_v].flag_table[ch_a_s] & LI_FLAG_CONFERENCE)
-		{
-			for (i = 0; i < li_total_channels; i++)
-			{
-				if (i != ch_a_s)
-					li_config_table[i].flag_table[ch_a_s] &= ~LI_FLAG_CONFERENCE;
-			}
-		}
-	}
-	if (li_flags & LI_FLAG_CONFERENCE_A_B)
-	{
-		li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
-	}
-	if (li_flags & LI_FLAG_CONFERENCE_B_A)
-	{
-		li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
-	}
-	if (li_flags & LI_FLAG_MONITOR_A)
-	{
-		li_config_table[ch_a].flag_table[ch_a_v] |= LI_FLAG_MONITOR;
-		li_config_table[ch_a].flag_table[ch_a_s] |= LI_FLAG_MONITOR;
-	}
-	if (li_flags & LI_FLAG_MONITOR_B)
-	{
-		li_config_table[ch_a].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
-		li_config_table[ch_a].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
-	}
-	if (li_flags & LI_FLAG_ANNOUNCEMENT_A)
-	{
-		li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
-		li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
-	}
-	if (li_flags & LI_FLAG_ANNOUNCEMENT_B)
-	{
-		li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
-		li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
-	}
-	if (li_flags & LI_FLAG_MIX_A)
-	{
-		li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_MIX;
-		li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_MIX;
-	}
-	if (li_flags & LI_FLAG_MIX_B)
-	{
-		li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_MIX;
-		li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_MIX;
-	}
-	if (ch_a_v != ch_a_s)
-	{
-		li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
-	}
-	if (ch_b_v != ch_b_s)
-	{
-		li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
-	}
-}
-
-
-static void li2_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
-			       dword plci_b_id, byte connect, dword li_flags)
-{
-	word ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
-	PLCI *plci_b;
-	DIVA_CAPI_ADAPTER *a_b;
-
-	a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
-	plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
-	ch_a = a->li_base + (plci->li_bchannel_id - 1);
-	if (!a->li_pri && (plci->tel == ADV_VOICE)
-	    && (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
-	{
-		ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
-		ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
-			a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
-	}
-	else
-	{
-		ch_a_v = ch_a;
-		ch_a_s = ch_a;
-	}
-	ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
-	if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
-	    && (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
-	{
-		ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
-		ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
-			a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
-	}
-	else
-	{
-		ch_b_v = ch_b;
-		ch_b_s = ch_b;
-	}
-	if (connect)
-	{
-		li_config_table[ch_b].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
-		li_config_table[ch_b].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
-		li_config_table[ch_b_v].flag_table[ch_b] &= ~LI_FLAG_MIX;
-		li_config_table[ch_b_s].flag_table[ch_b] &= ~LI_FLAG_MIX;
-		li_config_table[ch_b].flag_table[ch_b] &= ~LI_FLAG_PCCONNECT;
-		li_config_table[ch_b].chflags &= ~(LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP);
-	}
-	li_config_table[ch_b_v].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	li_config_table[ch_b_s].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	li_config_table[ch_b_v].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	li_config_table[ch_b_s].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	li_config_table[ch_a_v].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	li_config_table[ch_a_v].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	li_config_table[ch_a_s].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	li_config_table[ch_a_s].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
-	if (li_flags & LI2_FLAG_INTERCONNECT_A_B)
-	{
-		li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
-	}
-	if (li_flags & LI2_FLAG_INTERCONNECT_B_A)
-	{
-		li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
-	}
-	if (li_flags & LI2_FLAG_MONITOR_B)
-	{
-		li_config_table[ch_b].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
-		li_config_table[ch_b].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
-	}
-	if (li_flags & LI2_FLAG_MIX_B)
-	{
-		li_config_table[ch_b_v].flag_table[ch_b] |= LI_FLAG_MIX;
-		li_config_table[ch_b_s].flag_table[ch_b] |= LI_FLAG_MIX;
-	}
-	if (li_flags & LI2_FLAG_MONITOR_X)
-		li_config_table[ch_b].chflags |= LI_CHFLAG_MONITOR;
-	if (li_flags & LI2_FLAG_MIX_X)
-		li_config_table[ch_b].chflags |= LI_CHFLAG_MIX;
-	if (li_flags & LI2_FLAG_LOOP_B)
-	{
-		li_config_table[ch_b_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
-		li_config_table[ch_b_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
-	}
-	if (li_flags & LI2_FLAG_LOOP_PC)
-		li_config_table[ch_b].flag_table[ch_b] |= LI_FLAG_PCCONNECT;
-	if (li_flags & LI2_FLAG_LOOP_X)
-		li_config_table[ch_b].chflags |= LI_CHFLAG_LOOP;
-	if (li_flags & LI2_FLAG_PCCONNECT_A_B)
-		li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_PCCONNECT;
-	if (li_flags & LI2_FLAG_PCCONNECT_B_A)
-		li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_PCCONNECT;
-	if (ch_a_v != ch_a_s)
-	{
-		li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
-	}
-	if (ch_b_v != ch_b_s)
-	{
-		li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
-		li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
-	}
-}
-
-
-static word li_check_main_plci(dword Id, PLCI *plci)
-{
-	if (plci == NULL)
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		return (_WRONG_IDENTIFIER);
-	}
-	if (!plci->State
-	    || !plci->NL.Id || plci->nl_remove_id
-	    || (plci->li_bchannel_id == 0))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		return (_WRONG_STATE);
-	}
-	li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = plci;
-	return (GOOD);
-}
-
-
-static PLCI *li_check_plci_b(dword Id, PLCI *plci,
-			     dword plci_b_id, word plci_b_write_pos, byte *p_result)
-{
-	byte ctlr_b;
-	PLCI *plci_b;
-
-	if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
-	     LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
-		return (NULL);
-	}
-	ctlr_b = 0;
-	if ((plci_b_id & 0x7f) != 0)
-	{
-		ctlr_b = MapController((byte)(plci_b_id & 0x7f));
-		if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
-			ctlr_b = 0;
-	}
-	if ((ctlr_b == 0)
-	    || (((plci_b_id >> 8) & 0xff) == 0)
-	    || (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
-		PUT_WORD(p_result, _WRONG_IDENTIFIER);
-		return (NULL);
-	}
-	plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
-	if (!plci_b->State
-	    || !plci_b->NL.Id || plci_b->nl_remove_id
-	    || (plci_b->li_bchannel_id == 0))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
-		PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
-		return (NULL);
-	}
-	li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci = plci_b;
-	if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
-	    ((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
-	    && (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-		|| !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
-		PUT_WORD(p_result, _WRONG_IDENTIFIER);
-		return (NULL);
-	}
-	if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
-							  (word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
-		PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
-		return (NULL);
-	}
-	return (plci_b);
-}
-
-
-static PLCI *li2_check_plci_b(dword Id, PLCI *plci,
-			      dword plci_b_id, word plci_b_write_pos, byte *p_result)
-{
-	byte ctlr_b;
-	PLCI *plci_b;
-
-	if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
-	     LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		PUT_WORD(p_result, _WRONG_STATE);
-		return (NULL);
-	}
-	ctlr_b = 0;
-	if ((plci_b_id & 0x7f) != 0)
-	{
-		ctlr_b = MapController((byte)(plci_b_id & 0x7f));
-		if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
-			ctlr_b = 0;
-	}
-	if ((ctlr_b == 0)
-	    || (((plci_b_id >> 8) & 0xff) == 0)
-	    || (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
-		PUT_WORD(p_result, _WRONG_IDENTIFIER);
-		return (NULL);
-	}
-	plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
-	if (!plci_b->State
-	    || !plci_b->NL.Id || plci_b->nl_remove_id
-	    || (plci_b->li_bchannel_id == 0)
-	    || (li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci != plci_b))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
-		PUT_WORD(p_result, _WRONG_STATE);
-		return (NULL);
-	}
-	if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
-	    ((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
-	    && (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-		|| !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
-		PUT_WORD(p_result, _WRONG_IDENTIFIER);
-		return (NULL);
-	}
-	if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
-							  (word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
-				UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
-		PUT_WORD(p_result, _WRONG_STATE);
-		return (NULL);
-	}
-	return (plci_b);
-}
-
-
-static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL   *appl, API_PARSE *msg)
-{
-	word Info;
-	word i;
-	dword d, li_flags, plci_b_id;
-	PLCI *plci_b;
-	API_PARSE li_parms[3];
-	API_PARSE li_req_parms[3];
-	API_PARSE li_participant_struct[2];
-	API_PARSE li_participant_parms[3];
-	word participant_parms_pos;
-	byte result_buffer[32];
-	byte *result;
-	word result_pos;
-	word plci_b_write_pos;
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_request",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	Info = GOOD;
-	result = result_buffer;
-	result_buffer[0] = 0;
-	if (!(a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		Info = _FACILITY_NOT_SUPPORTED;
-	}
-	else if (api_parse(&msg[1].info[1], msg[1].length, "ws", li_parms))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		Info = _WRONG_MESSAGE_FORMAT;
-	}
-	else
-	{
-		result_buffer[0] = 3;
-		PUT_WORD(&result_buffer[1], GET_WORD(li_parms[0].info));
-		result_buffer[3] = 0;
-		switch (GET_WORD(li_parms[0].info))
-		{
-		case LI_GET_SUPPORTED_SERVICES:
-			if (appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
-			{
-				result_buffer[0] = 17;
-				result_buffer[3] = 14;
-				PUT_WORD(&result_buffer[4], GOOD);
-				d = 0;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_CH)
-					d |= LI_CONFERENCING_SUPPORTED;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
-					d |= LI_MONITORING_SUPPORTED;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
-					d |= LI_ANNOUNCEMENTS_SUPPORTED | LI_MIXING_SUPPORTED;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-					d |= LI_CROSS_CONTROLLER_SUPPORTED;
-				PUT_DWORD(&result_buffer[6], d);
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-				{
-					d = 0;
-					for (i = 0; i < li_total_channels; i++)
-					{
-						if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-						    && (li_config_table[i].adapter->li_pri
-							|| (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
-						{
-							d++;
-						}
-					}
-				}
-				else
-				{
-					d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
-				}
-				PUT_DWORD(&result_buffer[10], d / 2);
-				PUT_DWORD(&result_buffer[14], d);
-			}
-			else
-			{
-				result_buffer[0] = 25;
-				result_buffer[3] = 22;
-				PUT_WORD(&result_buffer[4], GOOD);
-				d = LI2_ASYMMETRIC_SUPPORTED | LI2_B_LOOPING_SUPPORTED | LI2_X_LOOPING_SUPPORTED;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
-					d |= LI2_MONITORING_SUPPORTED | LI2_REMOTE_MONITORING_SUPPORTED;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
-					d |= LI2_MIXING_SUPPORTED | LI2_REMOTE_MIXING_SUPPORTED;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_PC)
-					d |= LI2_PC_LOOPING_SUPPORTED;
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-					d |= LI2_CROSS_CONTROLLER_SUPPORTED;
-				PUT_DWORD(&result_buffer[6], d);
-				d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
-				PUT_DWORD(&result_buffer[10], d / 2);
-				PUT_DWORD(&result_buffer[14], d - 1);
-				if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-				{
-					d = 0;
-					for (i = 0; i < li_total_channels; i++)
-					{
-						if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
-						    && (li_config_table[i].adapter->li_pri
-							|| (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
-						{
-							d++;
-						}
-					}
-				}
-				PUT_DWORD(&result_buffer[18], d / 2);
-				PUT_DWORD(&result_buffer[22], d - 1);
-			}
-			break;
-
-		case LI_REQ_CONNECT:
-			if (li_parms[1].length == 8)
-			{
-				appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
-				if (api_parse(&li_parms[1].info[1], li_parms[1].length, "dd", li_req_parms))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
-				li_flags = GET_DWORD(li_req_parms[1].info);
-				Info = li_check_main_plci(Id, plci);
-				result_buffer[0] = 9;
-				result_buffer[3] = 6;
-				PUT_DWORD(&result_buffer[4], plci_b_id);
-				PUT_WORD(&result_buffer[8], GOOD);
-				if (Info != GOOD)
-					break;
-				result = plci->saved_msg.info;
-				for (i = 0; i <= result_buffer[0]; i++)
-					result[i] = result_buffer[i];
-				plci_b_write_pos = plci->li_plci_b_write_pos;
-				plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
-				if (plci_b == NULL)
-					break;
-				li_update_connect(Id, a, plci, plci_b_id, true, li_flags);
-				plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_LAST_FLAG;
-				plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-				plci->li_plci_b_write_pos = plci_b_write_pos;
-			}
-			else
-			{
-				appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
-				if (api_parse(&li_parms[1].info[1], li_parms[1].length, "ds", li_req_parms))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				li_flags = GET_DWORD(li_req_parms[0].info) & ~(LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A);
-				Info = li_check_main_plci(Id, plci);
-				result_buffer[0] = 7;
-				result_buffer[3] = 4;
-				PUT_WORD(&result_buffer[4], Info);
-				result_buffer[6] = 0;
-				if (Info != GOOD)
-					break;
-				result = plci->saved_msg.info;
-				for (i = 0; i <= result_buffer[0]; i++)
-					result[i] = result_buffer[i];
-				plci_b_write_pos = plci->li_plci_b_write_pos;
-				participant_parms_pos = 0;
-				result_pos = 7;
-				li2_update_connect(Id, a, plci, UnMapId(Id), true, li_flags);
-				while (participant_parms_pos < li_req_parms[1].length)
-				{
-					result[result_pos] = 6;
-					result_pos += 7;
-					PUT_DWORD(&result[result_pos - 6], 0);
-					PUT_WORD(&result[result_pos - 2], GOOD);
-					if (api_parse(&li_req_parms[1].info[1 + participant_parms_pos],
-						      (word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
-					{
-						dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-								UnMapId(Id), (char *)(FILE_), __LINE__));
-						PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
-						break;
-					}
-					if (api_parse(&li_participant_struct[0].info[1],
-						      li_participant_struct[0].length, "dd", li_participant_parms))
-					{
-						dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-								UnMapId(Id), (char *)(FILE_), __LINE__));
-						PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
-						break;
-					}
-					plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
-					li_flags = GET_DWORD(li_participant_parms[1].info);
-					PUT_DWORD(&result[result_pos - 6], plci_b_id);
-					if (sizeof(result) - result_pos < 7)
-					{
-						dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
-								UnMapId(Id), (char *)(FILE_), __LINE__));
-						PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
-						break;
-					}
-					plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
-					if (plci_b != NULL)
-					{
-						li2_update_connect(Id, a, plci, plci_b_id, true, li_flags);
-						plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id |
-							((li_flags & (LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A |
-								      LI2_FLAG_PCCONNECT_A_B | LI2_FLAG_PCCONNECT_B_A)) ? 0 : LI_PLCI_B_DISC_FLAG);
-						plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-					}
-					participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
-								       (&li_req_parms[1].info[1]));
-				}
-				result[0] = (byte)(result_pos - 1);
-				result[3] = (byte)(result_pos - 4);
-				result[6] = (byte)(result_pos - 7);
-				i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
-				if ((plci_b_write_pos == plci->li_plci_b_read_pos)
-				    || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
-				{
-					plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
-					plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-				}
-				else
-					plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
-				plci->li_plci_b_write_pos = plci_b_write_pos;
-			}
-			mixer_calculate_coefs(a);
-			plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
-			mixer_notify_update(plci, true);
-			sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
-			      "wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
-			plci->command = 0;
-			plci->li_cmd = GET_WORD(li_parms[0].info);
-			start_internal_command(Id, plci, mixer_command);
-			return (false);
-
-		case LI_REQ_DISCONNECT:
-			if (li_parms[1].length == 4)
-			{
-				appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
-				if (api_parse(&li_parms[1].info[1], li_parms[1].length, "d", li_req_parms))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
-				Info = li_check_main_plci(Id, plci);
-				result_buffer[0] = 9;
-				result_buffer[3] = 6;
-				PUT_DWORD(&result_buffer[4], GET_DWORD(li_req_parms[0].info));
-				PUT_WORD(&result_buffer[8], GOOD);
-				if (Info != GOOD)
-					break;
-				result = plci->saved_msg.info;
-				for (i = 0; i <= result_buffer[0]; i++)
-					result[i] = result_buffer[i];
-				plci_b_write_pos = plci->li_plci_b_write_pos;
-				plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
-				if (plci_b == NULL)
-					break;
-				li_update_connect(Id, a, plci, plci_b_id, false, 0);
-				plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG | LI_PLCI_B_LAST_FLAG;
-				plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-				plci->li_plci_b_write_pos = plci_b_write_pos;
-			}
-			else
-			{
-				appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
-				if (api_parse(&li_parms[1].info[1], li_parms[1].length, "s", li_req_parms))
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_MESSAGE_FORMAT;
-					break;
-				}
-				Info = li_check_main_plci(Id, plci);
-				result_buffer[0] = 7;
-				result_buffer[3] = 4;
-				PUT_WORD(&result_buffer[4], Info);
-				result_buffer[6] = 0;
-				if (Info != GOOD)
-					break;
-				result = plci->saved_msg.info;
-				for (i = 0; i <= result_buffer[0]; i++)
-					result[i] = result_buffer[i];
-				plci_b_write_pos = plci->li_plci_b_write_pos;
-				participant_parms_pos = 0;
-				result_pos = 7;
-				while (participant_parms_pos < li_req_parms[0].length)
-				{
-					result[result_pos] = 6;
-					result_pos += 7;
-					PUT_DWORD(&result[result_pos - 6], 0);
-					PUT_WORD(&result[result_pos - 2], GOOD);
-					if (api_parse(&li_req_parms[0].info[1 + participant_parms_pos],
-						      (word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
-					{
-						dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-								UnMapId(Id), (char *)(FILE_), __LINE__));
-						PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
-						break;
-					}
-					if (api_parse(&li_participant_struct[0].info[1],
-						      li_participant_struct[0].length, "d", li_participant_parms))
-					{
-						dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-								UnMapId(Id), (char *)(FILE_), __LINE__));
-						PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
-						break;
-					}
-					plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
-					PUT_DWORD(&result[result_pos - 6], plci_b_id);
-					if (sizeof(result) - result_pos < 7)
-					{
-						dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
-								UnMapId(Id), (char *)(FILE_), __LINE__));
-						PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
-						break;
-					}
-					plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
-					if (plci_b != NULL)
-					{
-						li2_update_connect(Id, a, plci, plci_b_id, false, 0);
-						plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
-						plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-					}
-					participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
-								       (&li_req_parms[0].info[1]));
-				}
-				result[0] = (byte)(result_pos - 1);
-				result[3] = (byte)(result_pos - 4);
-				result[6] = (byte)(result_pos - 7);
-				i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
-				if ((plci_b_write_pos == plci->li_plci_b_read_pos)
-				    || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
-				{
-					plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
-					plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-				}
-				else
-					plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
-				plci->li_plci_b_write_pos = plci_b_write_pos;
-			}
-			mixer_calculate_coefs(a);
-			plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
-			mixer_notify_update(plci, true);
-			sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
-			      "wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
-			plci->command = 0;
-			plci->li_cmd = GET_WORD(li_parms[0].info);
-			start_internal_command(Id, plci, mixer_command);
-			return (false);
-
-		case LI_REQ_SILENT_UPDATE:
-			if (!plci || !plci->State
-			    || !plci->NL.Id || plci->nl_remove_id
-			    || (plci->li_bchannel_id == 0)
-			    || (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci != plci))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				return (false);
-			}
-			plci_b_write_pos = plci->li_plci_b_write_pos;
-			if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
-			     LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				return (false);
-			}
-			i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
-			if ((plci_b_write_pos == plci->li_plci_b_read_pos)
-			    || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
-			{
-				plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
-				plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-			}
-			else
-				plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
-			plci->li_plci_b_write_pos = plci_b_write_pos;
-			plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
-			plci->command = 0;
-			plci->li_cmd = GET_WORD(li_parms[0].info);
-			start_internal_command(Id, plci, mixer_command);
-			return (false);
-
-		default:
-			dbug(1, dprintf("[%06lx] %s,%d: LI unknown request %04x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(li_parms[0].info)));
-			Info = _FACILITY_NOT_SUPPORTED;
-		}
-	}
-	sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
-	      "wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
-	return (false);
-}
-
-
-static void mixer_indication_coefs_set(dword Id, PLCI *plci)
-{
-	dword d;
-	byte result[12];
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_coefs_set",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	if (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos)
-	{
-		do
-		{
-			d = plci->li_plci_b_queue[plci->li_plci_b_read_pos];
-			if (!(d & LI_PLCI_B_SKIP_FLAG))
-			{
-				if (plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
-				{
-					if (d & LI_PLCI_B_DISC_FLAG)
-					{
-						result[0] = 5;
-						PUT_WORD(&result[1], LI_IND_DISCONNECT);
-						result[3] = 2;
-						PUT_WORD(&result[4], _LI_USER_INITIATED);
-					}
-					else
-					{
-						result[0] = 7;
-						PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
-						result[3] = 4;
-						PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
-					}
-				}
-				else
-				{
-					if (d & LI_PLCI_B_DISC_FLAG)
-					{
-						result[0] = 9;
-						PUT_WORD(&result[1], LI_IND_DISCONNECT);
-						result[3] = 6;
-						PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
-						PUT_WORD(&result[8], _LI_USER_INITIATED);
-					}
-					else
-					{
-						result[0] = 7;
-						PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
-						result[3] = 4;
-						PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
-					}
-				}
-				sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0,
-				      "ws", SELECTOR_LINE_INTERCONNECT, result);
-			}
-			plci->li_plci_b_read_pos = (plci->li_plci_b_read_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ?
-				0 : plci->li_plci_b_read_pos + 1;
-		} while (!(d & LI_PLCI_B_LAST_FLAG) && (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos));
-	}
-}
-
-
-static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length)
-{
-	word i, j, ch;
-	struct xconnect_transfer_address_s s,   *p;
-	DIVA_CAPI_ADAPTER *a;
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_from %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, (int)length));
-
-	a = plci->adapter;
-	i = 1;
-	for (i = 1; i < length; i += 16)
-	{
-		s.card_address.low = msg[i] | (msg[i + 1] << 8) | (((dword)(msg[i + 2])) << 16) | (((dword)(msg[i + 3])) << 24);
-		s.card_address.high = msg[i + 4] | (msg[i + 5] << 8) | (((dword)(msg[i + 6])) << 16) | (((dword)(msg[i + 7])) << 24);
-		s.offset = msg[i + 8] | (msg[i + 9] << 8) | (((dword)(msg[i + 10])) << 16) | (((dword)(msg[i + 11])) << 24);
-		ch = msg[i + 12] | (msg[i + 13] << 8);
-		j = ch & XCONNECT_CHANNEL_NUMBER_MASK;
-		if (!a->li_pri && (plci->li_bchannel_id == 2))
-			j = 1 - j;
-		j += a->li_base;
-		if (ch & XCONNECT_CHANNEL_PORT_PC)
-			p = &(li_config_table[j].send_pc);
-		else
-			p = &(li_config_table[j].send_b);
-		p->card_address.low = s.card_address.low;
-		p->card_address.high = s.card_address.high;
-		p->offset = s.offset;
-		li_config_table[j].channel |= LI_CHANNEL_ADDRESSES_SET;
-	}
-	if (plci->internal_command_queue[0]
-	    && ((plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
-		|| (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
-		|| (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)))
-	{
-		(*(plci->internal_command_queue[0]))(Id, plci, 0);
-		if (!plci->internal_command)
-			next_internal_command(Id, plci);
-	}
-	mixer_notify_update(plci, true);
-}
-
-
-static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_to %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, (int) length));
-
-}
-
-
-static byte mixer_notify_source_removed(PLCI *plci, dword plci_b_id)
-{
-	word plci_b_write_pos;
-
-	plci_b_write_pos = plci->li_plci_b_write_pos;
-	if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
-	     LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 1)
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
-				(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-				(char *)(FILE_), __LINE__));
-		return (false);
-	}
-	plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
-	plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
-	plci->li_plci_b_write_pos = plci_b_write_pos;
-	return (true);
-}
-
-
-static void mixer_remove(PLCI *plci)
-{
-	DIVA_CAPI_ADAPTER *a;
-	PLCI *notify_plci;
-	dword plci_b_id;
-	word i, j;
-
-	dbug(1, dprintf("[%06lx] %s,%d: mixer_remove",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	a = plci->adapter;
-	plci_b_id = (plci->Id << 8) | UnMapController(plci->adapter->Id);
-	if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
-	{
-		if ((plci->li_bchannel_id != 0)
-		    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-		{
-			i = a->li_base + (plci->li_bchannel_id - 1);
-			if ((li_config_table[i].curchnl | li_config_table[i].channel) & LI_CHANNEL_INVOLVED)
-			{
-				for (j = 0; j < li_total_channels; j++)
-				{
-					if ((li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
-					    || (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT))
-					{
-						notify_plci = li_config_table[j].plci;
-						if ((notify_plci != NULL)
-						    && (notify_plci != plci)
-						    && (notify_plci->appl != NULL)
-						    && !(notify_plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
-						    && (notify_plci->State)
-						    && notify_plci->NL.Id && !notify_plci->nl_remove_id)
-						{
-							mixer_notify_source_removed(notify_plci, plci_b_id);
-						}
-					}
-				}
-				mixer_clear_config(plci);
-				mixer_calculate_coefs(a);
-				mixer_notify_update(plci, true);
-			}
-			li_config_table[i].plci = NULL;
-			plci->li_bchannel_id = 0;
-		}
-	}
-}
-
-
-/*------------------------------------------------------------------*/
-/* Echo canceller facilities                                        */
-/*------------------------------------------------------------------*/
-
-
-static void ec_write_parameters(PLCI *plci)
-{
-	word w;
-	byte parameter_buffer[6];
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_write_parameters",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	parameter_buffer[0] = 5;
-	parameter_buffer[1] = DSP_CTRL_SET_LEC_PARAMETERS;
-	PUT_WORD(&parameter_buffer[2], plci->ec_idi_options);
-	plci->ec_idi_options &= ~LEC_RESET_COEFFICIENTS;
-	w = (plci->ec_tail_length == 0) ? 128 : plci->ec_tail_length;
-	PUT_WORD(&parameter_buffer[4], w);
-	add_p(plci, FTY, parameter_buffer);
-	sig_req(plci, TEL_CTRL, 0);
-	send_req(plci);
-}
-
-
-static void ec_clear_config(PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_clear_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
-		LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING;
-	plci->ec_tail_length = 0;
-}
-
-
-static void ec_prepare_switch(dword Id, PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_prepare_switch",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-}
-
-
-static word ec_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_save_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	return (GOOD);
-}
-
-
-static word ec_restore_config(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_restore_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	Info = GOOD;
-	if (plci->B1_facilities & B1_FACILITY_EC)
-	{
-		switch (plci->adjust_b_state)
-		{
-		case ADJUST_B_RESTORE_EC_1:
-			plci->internal_command = plci->adjust_b_command;
-			if (plci->sig_req)
-			{
-				plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
-				break;
-			}
-			ec_write_parameters(plci);
-			plci->adjust_b_state = ADJUST_B_RESTORE_EC_2;
-			break;
-		case ADJUST_B_RESTORE_EC_2:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Restore EC failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _WRONG_STATE;
-				break;
-			}
-			break;
-		}
-	}
-	return (Info);
-}
-
-
-static void ec_command(dword Id, PLCI *plci, byte Rc)
-{
-	word internal_command, Info;
-	byte result[8];
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_command %02x %04x %04x %04x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
-			plci->ec_cmd, plci->ec_idi_options, plci->ec_tail_length));
-
-	Info = GOOD;
-	if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
-	{
-		result[0] = 2;
-		PUT_WORD(&result[1], EC_SUCCESS);
-	}
-	else
-	{
-		result[0] = 5;
-		PUT_WORD(&result[1], plci->ec_cmd);
-		result[3] = 2;
-		PUT_WORD(&result[4], GOOD);
-	}
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (plci->ec_cmd)
-	{
-	case EC_ENABLE_OPERATION:
-	case EC_FREEZE_COEFFICIENTS:
-	case EC_RESUME_COEFFICIENT_UPDATE:
-	case EC_RESET_COEFFICIENTS:
-		switch (internal_command)
-		{
-		default:
-			adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
-								  B1_FACILITY_EC), EC_COMMAND_1);
-			/* fall through */
-		case EC_COMMAND_1:
-			if (adjust_b_process(Id, plci, Rc) != GOOD)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Load EC failed",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			if (plci->internal_command)
-				return;
-			/* fall through */
-		case EC_COMMAND_2:
-			if (plci->sig_req)
-			{
-				plci->internal_command = EC_COMMAND_2;
-				return;
-			}
-			plci->internal_command = EC_COMMAND_3;
-			ec_write_parameters(plci);
-			return;
-		case EC_COMMAND_3:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Enable EC failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			break;
-		}
-		break;
-
-	case EC_DISABLE_OPERATION:
-		switch (internal_command)
-		{
-		default:
-		case EC_COMMAND_1:
-			if (plci->B1_facilities & B1_FACILITY_EC)
-			{
-				if (plci->sig_req)
-				{
-					plci->internal_command = EC_COMMAND_1;
-					return;
-				}
-				plci->internal_command = EC_COMMAND_2;
-				ec_write_parameters(plci);
-				return;
-			}
-			Rc = OK;
-			/* fall through */
-		case EC_COMMAND_2:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Disable EC failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
-								  ~B1_FACILITY_EC), EC_COMMAND_3);
-			/* fall through */
-		case EC_COMMAND_3:
-			if (adjust_b_process(Id, plci, Rc) != GOOD)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Unload EC failed",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _FACILITY_NOT_SUPPORTED;
-				break;
-			}
-			if (plci->internal_command)
-				return;
-			break;
-		}
-		break;
-	}
-	sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
-	      "wws", Info, (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
-	      PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
-}
-
-
-static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL   *appl, API_PARSE *msg)
-{
-	word Info;
-	word opt;
-	API_PARSE ec_parms[3];
-	byte result[16];
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_request",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	Info = GOOD;
-	result[0] = 0;
-	if (!(a->man_profile.private_options & (1L << PRIVATE_ECHO_CANCELLER)))
-	{
-		dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		Info = _FACILITY_NOT_SUPPORTED;
-	}
-	else
-	{
-		if (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
-		{
-			if (api_parse(&msg[1].info[1], msg[1].length, "w", ec_parms))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _WRONG_MESSAGE_FORMAT;
-			}
-			else
-			{
-				if (plci == NULL)
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_IDENTIFIER;
-				}
-				else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_STATE;
-				}
-				else
-				{
-					plci->command = 0;
-					plci->ec_cmd = GET_WORD(ec_parms[0].info);
-					plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
-					result[0] = 2;
-					PUT_WORD(&result[1], EC_SUCCESS);
-					if (msg[1].length >= 4)
-					{
-						opt = GET_WORD(&ec_parms[0].info[2]);
-						plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
-									  LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
-						if (!(opt & EC_DISABLE_NON_LINEAR_PROCESSING))
-							plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
-						if (opt & EC_DETECT_DISABLE_TONE)
-							plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
-						if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
-							plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
-						if (msg[1].length >= 6)
-						{
-							plci->ec_tail_length = GET_WORD(&ec_parms[0].info[4]);
-						}
-					}
-					switch (plci->ec_cmd)
-					{
-					case EC_ENABLE_OPERATION:
-						plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
-						start_internal_command(Id, plci, ec_command);
-						return (false);
-
-					case EC_DISABLE_OPERATION:
-						plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
-							LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
-							LEC_RESET_COEFFICIENTS;
-						start_internal_command(Id, plci, ec_command);
-						return (false);
-
-					case EC_FREEZE_COEFFICIENTS:
-						plci->ec_idi_options |= LEC_FREEZE_COEFFICIENTS;
-						start_internal_command(Id, plci, ec_command);
-						return (false);
-
-					case EC_RESUME_COEFFICIENT_UPDATE:
-						plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
-						start_internal_command(Id, plci, ec_command);
-						return (false);
-
-					case EC_RESET_COEFFICIENTS:
-						plci->ec_idi_options |= LEC_RESET_COEFFICIENTS;
-						start_internal_command(Id, plci, ec_command);
-						return (false);
-
-					default:
-						dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
-								UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
-						PUT_WORD(&result[1], EC_UNSUPPORTED_OPERATION);
-					}
-				}
-			}
-		}
-		else
-		{
-			if (api_parse(&msg[1].info[1], msg[1].length, "ws", ec_parms))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
-						UnMapId(Id), (char *)(FILE_), __LINE__));
-				Info = _WRONG_MESSAGE_FORMAT;
-			}
-			else
-			{
-				if (GET_WORD(ec_parms[0].info) == EC_GET_SUPPORTED_SERVICES)
-				{
-					result[0] = 11;
-					PUT_WORD(&result[1], EC_GET_SUPPORTED_SERVICES);
-					result[3] = 8;
-					PUT_WORD(&result[4], GOOD);
-					PUT_WORD(&result[6], 0x0007);
-					PUT_WORD(&result[8], LEC_MAX_SUPPORTED_TAIL_LENGTH);
-					PUT_WORD(&result[10], 0);
-				}
-				else if (plci == NULL)
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_IDENTIFIER;
-				}
-				else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
-				{
-					dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
-							UnMapId(Id), (char *)(FILE_), __LINE__));
-					Info = _WRONG_STATE;
-				}
-				else
-				{
-					plci->command = 0;
-					plci->ec_cmd = GET_WORD(ec_parms[0].info);
-					plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
-					result[0] = 5;
-					PUT_WORD(&result[1], plci->ec_cmd);
-					result[3] = 2;
-					PUT_WORD(&result[4], GOOD);
-					plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
-								  LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
-					plci->ec_tail_length = 0;
-					if (ec_parms[1].length >= 2)
-					{
-						opt = GET_WORD(&ec_parms[1].info[1]);
-						if (opt & EC_ENABLE_NON_LINEAR_PROCESSING)
-							plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
-						if (opt & EC_DETECT_DISABLE_TONE)
-							plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
-						if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
-							plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
-						if (ec_parms[1].length >= 4)
-						{
-							plci->ec_tail_length = GET_WORD(&ec_parms[1].info[3]);
-						}
-					}
-					switch (plci->ec_cmd)
-					{
-					case EC_ENABLE_OPERATION:
-						plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
-						start_internal_command(Id, plci, ec_command);
-						return (false);
-
-					case EC_DISABLE_OPERATION:
-						plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
-							LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
-							LEC_RESET_COEFFICIENTS;
-						start_internal_command(Id, plci, ec_command);
-						return (false);
-
-					default:
-						dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
-								UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
-						PUT_WORD(&result[4], _FACILITY_SPECIFIC_FUNCTION_NOT_SUPP);
-					}
-				}
-			}
-		}
-	}
-	sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
-	      "wws", Info, (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
-	      PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
-	return (false);
-}
-
-
-static void ec_indication(dword Id, PLCI *plci, byte *msg, word length)
-{
-	byte result[8];
-
-	dbug(1, dprintf("[%06lx] %s,%d: ec_indication",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-	if (!(plci->ec_idi_options & LEC_MANUAL_DISABLE))
-	{
-		if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
-		{
-			result[0] = 2;
-			PUT_WORD(&result[1], 0);
-			switch (msg[1])
-			{
-			case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
-				PUT_WORD(&result[1], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
-				break;
-			case LEC_DISABLE_TYPE_REVERSED_2100HZ:
-				PUT_WORD(&result[1], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
-				break;
-			case LEC_DISABLE_RELEASED:
-				PUT_WORD(&result[1], EC_BYPASS_RELEASED);
-				break;
-			}
-		}
-		else
-		{
-			result[0] = 5;
-			PUT_WORD(&result[1], EC_BYPASS_INDICATION);
-			result[3] = 2;
-			PUT_WORD(&result[4], 0);
-			switch (msg[1])
-			{
-			case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
-				PUT_WORD(&result[4], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
-				break;
-			case LEC_DISABLE_TYPE_REVERSED_2100HZ:
-				PUT_WORD(&result[4], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
-				break;
-			case LEC_DISABLE_RELEASED:
-				PUT_WORD(&result[4], EC_BYPASS_RELEASED);
-				break;
-			}
-		}
-		sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
-		      PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
-	}
-}
-
-
-
-/*------------------------------------------------------------------*/
-/* Advanced voice                                                   */
-/*------------------------------------------------------------------*/
-
-static void adv_voice_write_coefs(PLCI *plci, word write_command)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word i;
-	byte *p;
-
-	word w, n, j, k;
-	byte ch_map[MIXER_CHANNELS_BRI];
-
-	byte coef_buffer[ADV_VOICE_COEF_BUFFER_SIZE + 2];
-
-	dbug(1, dprintf("[%06lx] %s,%d: adv_voice_write_coefs %d",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, write_command));
-
-	a = plci->adapter;
-	p = coef_buffer + 1;
-	*(p++) = DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS;
-	i = 0;
-	while (i + sizeof(word) <= a->adv_voice_coef_length)
-	{
-		PUT_WORD(p, GET_WORD(a->adv_voice_coef_buffer + i));
-		p += 2;
-		i += 2;
-	}
-	while (i < ADV_VOICE_OLD_COEF_COUNT * sizeof(word))
-	{
-		PUT_WORD(p, 0x8000);
-		p += 2;
-		i += 2;
-	}
-
-	if (!a->li_pri && (plci->li_bchannel_id == 0))
-	{
-		if ((li_config_table[a->li_base].plci == NULL) && (li_config_table[a->li_base + 1].plci != NULL))
-		{
-			plci->li_bchannel_id = 1;
-			li_config_table[a->li_base].plci = plci;
-			dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
-					(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-					(char *)(FILE_), __LINE__, plci->li_bchannel_id));
-		}
-		else if ((li_config_table[a->li_base].plci != NULL) && (li_config_table[a->li_base + 1].plci == NULL))
-		{
-			plci->li_bchannel_id = 2;
-			li_config_table[a->li_base + 1].plci = plci;
-			dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
-					(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-					(char *)(FILE_), __LINE__, plci->li_bchannel_id));
-		}
-	}
-	if (!a->li_pri && (plci->li_bchannel_id != 0)
-	    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-	{
-		i = a->li_base + (plci->li_bchannel_id - 1);
-		switch (write_command)
-		{
-		case ADV_VOICE_WRITE_ACTIVATION:
-			j = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
-			k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
-			if (!(plci->B1_facilities & B1_FACILITY_MIXER))
-			{
-				li_config_table[j].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
-				li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
-			}
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
-			{
-				li_config_table[k].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
-				li_config_table[i].flag_table[k] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
-				li_config_table[k].flag_table[j] |= LI_FLAG_CONFERENCE;
-				li_config_table[j].flag_table[k] |= LI_FLAG_CONFERENCE;
-			}
-			mixer_calculate_coefs(a);
-			li_config_table[i].curchnl = li_config_table[i].channel;
-			li_config_table[j].curchnl = li_config_table[j].channel;
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
-				li_config_table[k].curchnl = li_config_table[k].channel;
-			break;
-
-		case ADV_VOICE_WRITE_DEACTIVATION:
-			for (j = 0; j < li_total_channels; j++)
-			{
-				li_config_table[i].flag_table[j] = 0;
-				li_config_table[j].flag_table[i] = 0;
-			}
-			k = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
-			for (j = 0; j < li_total_channels; j++)
-			{
-				li_config_table[k].flag_table[j] = 0;
-				li_config_table[j].flag_table[k] = 0;
-			}
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
-			{
-				k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
-				for (j = 0; j < li_total_channels; j++)
-				{
-					li_config_table[k].flag_table[j] = 0;
-					li_config_table[j].flag_table[k] = 0;
-				}
-			}
-			mixer_calculate_coefs(a);
-			break;
-		}
-		if (plci->B1_facilities & B1_FACILITY_MIXER)
-		{
-			w = 0;
-			if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length)
-				w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
-			if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
-				w |= MIXER_FEATURE_ENABLE_TX_DATA;
-			if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
-				w |= MIXER_FEATURE_ENABLE_RX_DATA;
-			*(p++) = (byte) w;
-			*(p++) = (byte)(w >> 8);
-			for (j = 0; j < sizeof(ch_map); j += 2)
-			{
-				ch_map[j] = (byte)(j + (plci->li_bchannel_id - 1));
-				ch_map[j + 1] = (byte)(j + (2 - plci->li_bchannel_id));
-			}
-			for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
-			{
-				i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
-				j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
-				if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
-				{
-					*(p++) = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
-					w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
-					li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
-				}
-				else
-				{
-					*(p++) = (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n < a->adv_voice_coef_length) ?
-						a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n] : 0x00;
-				}
-			}
-		}
-		else
-		{
-			for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
-				*(p++) = a->adv_voice_coef_buffer[i];
-		}
-	}
-	else
-
-	{
-		for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
-			*(p++) = a->adv_voice_coef_buffer[i];
-	}
-	coef_buffer[0] = (p - coef_buffer) - 1;
-	add_p(plci, FTY, coef_buffer);
-	sig_req(plci, TEL_CTRL, 0);
-	send_req(plci);
-}
-
-
-static void adv_voice_clear_config(PLCI *plci)
-{
-	DIVA_CAPI_ADAPTER *a;
-
-	word i, j;
-
-
-	dbug(1, dprintf("[%06lx] %s,%d: adv_voice_clear_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	a = plci->adapter;
-	if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
-	{
-		a->adv_voice_coef_length = 0;
-
-		if (!a->li_pri && (plci->li_bchannel_id != 0)
-		    && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-		{
-			i = a->li_base + (plci->li_bchannel_id - 1);
-			li_config_table[i].curchnl = 0;
-			li_config_table[i].channel = 0;
-			li_config_table[i].chflags = 0;
-			for (j = 0; j < li_total_channels; j++)
-			{
-				li_config_table[i].flag_table[j] = 0;
-				li_config_table[j].flag_table[i] = 0;
-				li_config_table[i].coef_table[j] = 0;
-				li_config_table[j].coef_table[i] = 0;
-			}
-			li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
-			i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
-			li_config_table[i].curchnl = 0;
-			li_config_table[i].channel = 0;
-			li_config_table[i].chflags = 0;
-			for (j = 0; j < li_total_channels; j++)
-			{
-				li_config_table[i].flag_table[j] = 0;
-				li_config_table[j].flag_table[i] = 0;
-				li_config_table[i].coef_table[j] = 0;
-				li_config_table[j].coef_table[i] = 0;
-			}
-			if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
-			{
-				i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
-				li_config_table[i].curchnl = 0;
-				li_config_table[i].channel = 0;
-				li_config_table[i].chflags = 0;
-				for (j = 0; j < li_total_channels; j++)
-				{
-					li_config_table[i].flag_table[j] = 0;
-					li_config_table[j].flag_table[i] = 0;
-					li_config_table[i].coef_table[j] = 0;
-					li_config_table[j].coef_table[i] = 0;
-				}
-			}
-		}
-
-	}
-}
-
-
-static void adv_voice_prepare_switch(dword Id, PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: adv_voice_prepare_switch",
-			UnMapId(Id), (char *)(FILE_), __LINE__));
-
-}
-
-
-static word adv_voice_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: adv_voice_save_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	return (GOOD);
-}
-
-
-static word adv_voice_restore_config(dword Id, PLCI *plci, byte Rc)
-{
-	DIVA_CAPI_ADAPTER *a;
-	word Info;
-
-	dbug(1, dprintf("[%06lx] %s,%d: adv_voice_restore_config %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	Info = GOOD;
-	a = plci->adapter;
-	if ((plci->B1_facilities & B1_FACILITY_VOICE)
-	    && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
-	{
-		switch (plci->adjust_b_state)
-		{
-		case ADJUST_B_RESTORE_VOICE_1:
-			plci->internal_command = plci->adjust_b_command;
-			if (plci->sig_req)
-			{
-				plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
-				break;
-			}
-			adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
-			plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_2;
-			break;
-		case ADJUST_B_RESTORE_VOICE_2:
-			if ((Rc != OK) && (Rc != OK_FC))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Restore voice config failed %02x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-				Info = _WRONG_STATE;
-				break;
-			}
-			break;
-		}
-	}
-	return (Info);
-}
-
-
-
-
-/*------------------------------------------------------------------*/
-/* B1 resource switching                                            */
-/*------------------------------------------------------------------*/
-
-static byte b1_facilities_table[] =
-{
-	0x00,  /* 0  No bchannel resources      */
-	0x00,  /* 1  Codec (automatic law)      */
-	0x00,  /* 2  Codec (A-law)              */
-	0x00,  /* 3  Codec (y-law)              */
-	0x00,  /* 4  HDLC for X.21              */
-	0x00,  /* 5  HDLC                       */
-	0x00,  /* 6  External Device 0          */
-	0x00,  /* 7  External Device 1          */
-	0x00,  /* 8  HDLC 56k                   */
-	0x00,  /* 9  Transparent                */
-	0x00,  /* 10 Loopback to network        */
-	0x00,  /* 11 Test pattern to net        */
-	0x00,  /* 12 Rate adaptation sync       */
-	0x00,  /* 13 Rate adaptation async      */
-	0x00,  /* 14 R-Interface                */
-	0x00,  /* 15 HDLC 128k leased line      */
-	0x00,  /* 16 FAX                        */
-	0x00,  /* 17 Modem async                */
-	0x00,  /* 18 Modem sync HDLC            */
-	0x00,  /* 19 V.110 async HDLC           */
-	0x12,  /* 20 Adv voice (Trans,mixer)    */
-	0x00,  /* 21 Codec connected to IC      */
-	0x0c,  /* 22 Trans,DTMF                 */
-	0x1e,  /* 23 Trans,DTMF+mixer           */
-	0x1f,  /* 24 Trans,DTMF+mixer+local     */
-	0x13,  /* 25 Trans,mixer+local          */
-	0x12,  /* 26 HDLC,mixer                 */
-	0x12,  /* 27 HDLC 56k,mixer             */
-	0x2c,  /* 28 Trans,LEC+DTMF             */
-	0x3e,  /* 29 Trans,LEC+DTMF+mixer       */
-	0x3f,  /* 30 Trans,LEC+DTMF+mixer+local */
-	0x2c,  /* 31 RTP,LEC+DTMF               */
-	0x3e,  /* 32 RTP,LEC+DTMF+mixer         */
-	0x3f,  /* 33 RTP,LEC+DTMF+mixer+local   */
-	0x00,  /* 34 Signaling task             */
-	0x00,  /* 35 PIAFS                      */
-	0x0c,  /* 36 Trans,DTMF+TONE            */
-	0x1e,  /* 37 Trans,DTMF+TONE+mixer      */
-	0x1f   /* 38 Trans,DTMF+TONE+mixer+local*/
-};
-
-
-static word get_b1_facilities(PLCI *plci, byte b1_resource)
-{
-	word b1_facilities;
-
-	b1_facilities = b1_facilities_table[b1_resource];
-	if ((b1_resource == 9) || (b1_resource == 20) || (b1_resource == 25))
-	{
-
-		if (!(((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
-		      || (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
-
-		{
-			if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND)
-				b1_facilities |= B1_FACILITY_DTMFX;
-			if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)
-				b1_facilities |= B1_FACILITY_DTMFR;
-		}
-	}
-	if ((b1_resource == 17) || (b1_resource == 18))
-	{
-		if (plci->adapter->manufacturer_features & (MANUFACTURER_FEATURE_V18 | MANUFACTURER_FEATURE_VOWN))
-			b1_facilities |= B1_FACILITY_DTMFX | B1_FACILITY_DTMFR;
-	}
-/*
-  dbug (1, dprintf("[%06lx] %s,%d: get_b1_facilities %d %04x",
-  (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-  (char far *)(FILE_), __LINE__, b1_resource, b1_facilites));
-*/
-	return (b1_facilities);
-}
-
-
-static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities)
-{
-	byte b;
-
-	switch (b1_resource)
-	{
-	case 5:
-	case 26:
-		if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
-			b = 26;
-		else
-			b = 5;
-		break;
-
-	case 8:
-	case 27:
-		if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
-			b = 27;
-		else
-			b = 8;
-		break;
-
-	case 9:
-	case 20:
-	case 22:
-	case 23:
-	case 24:
-	case 25:
-	case 28:
-	case 29:
-	case 30:
-	case 36:
-	case 37:
-	case 38:
-		if (b1_facilities & B1_FACILITY_EC)
-		{
-			if (b1_facilities & B1_FACILITY_LOCAL)
-				b = 30;
-			else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
-				b = 29;
-			else
-				b = 28;
-		}
-
-		else if ((b1_facilities & (B1_FACILITY_DTMFX | B1_FACILITY_DTMFR | B1_FACILITY_MIXER))
-			 && (((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
-			     || (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
-		{
-			if (b1_facilities & B1_FACILITY_LOCAL)
-				b = 38;
-			else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
-				b = 37;
-			else
-				b = 36;
-		}
-
-		else if (((plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
-			  && !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
-			 || ((b1_facilities & B1_FACILITY_DTMFR)
-			     && ((b1_facilities & B1_FACILITY_MIXER)
-				 || !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)))
-			 || ((b1_facilities & B1_FACILITY_DTMFX)
-			     && ((b1_facilities & B1_FACILITY_MIXER)
-				 || !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND))))
-		{
-			if (b1_facilities & B1_FACILITY_LOCAL)
-				b = 24;
-			else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
-				b = 23;
-			else
-				b = 22;
-		}
-		else
-		{
-			if (b1_facilities & B1_FACILITY_LOCAL)
-				b = 25;
-			else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
-				b = 20;
-			else
-				b = 9;
-		}
-		break;
-
-	case 31:
-	case 32:
-	case 33:
-		if (b1_facilities & B1_FACILITY_LOCAL)
-			b = 33;
-		else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
-			b = 32;
-		else
-			b = 31;
-		break;
-
-	default:
-		b = b1_resource;
-	}
-	dbug(1, dprintf("[%06lx] %s,%d: add_b1_facilities %d %04x %d %04x",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__,
-			b1_resource, b1_facilities, b, get_b1_facilities(plci, b)));
-	return (b);
-}
-
-
-static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities)
-{
-	word removed_facilities;
-
-	dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_facilities %d %04x %04x",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__, new_b1_resource, new_b1_facilities,
-			new_b1_facilities & get_b1_facilities(plci, new_b1_resource)));
-
-	new_b1_facilities &= get_b1_facilities(plci, new_b1_resource);
-	removed_facilities = plci->B1_facilities & ~new_b1_facilities;
-
-	if (removed_facilities & B1_FACILITY_EC)
-		ec_clear_config(plci);
-
-
-	if (removed_facilities & B1_FACILITY_DTMFR)
-	{
-		dtmf_rec_clear_config(plci);
-		dtmf_parameter_clear_config(plci);
-	}
-	if (removed_facilities & B1_FACILITY_DTMFX)
-		dtmf_send_clear_config(plci);
-
-
-	if (removed_facilities & B1_FACILITY_MIXER)
-		mixer_clear_config(plci);
-
-	if (removed_facilities & B1_FACILITY_VOICE)
-		adv_voice_clear_config(plci);
-	plci->B1_facilities = new_b1_facilities;
-}
-
-
-static void adjust_b_clear(PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: adjust_b_clear",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	plci->adjust_b_restore = false;
-}
-
-
-static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-	byte b1_resource;
-	NCCI *ncci_ptr;
-	API_PARSE bp[2];
-
-	dbug(1, dprintf("[%06lx] %s,%d: adjust_b_process %02x %d",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
-	Info = GOOD;
-	switch (plci->adjust_b_state)
-	{
-	case ADJUST_B_START:
-		if ((plci->adjust_b_parms_msg == NULL)
-		    && (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
-		    && ((plci->adjust_b_mode & ~(ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 |
-						 ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_RESTORE)) == 0))
-		{
-			b1_resource = (plci->adjust_b_mode == ADJUST_B_MODE_NO_RESOURCE) ?
-				0 : add_b1_facilities(plci, plci->B1_resource, plci->adjust_b_facilities);
-			if (b1_resource == plci->B1_resource)
-			{
-				adjust_b1_facilities(plci, b1_resource, plci->adjust_b_facilities);
-				break;
-			}
-			if (plci->adjust_b_facilities & ~get_b1_facilities(plci, b1_resource))
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Adjust B nonsupported facilities %d %d %04x",
-						UnMapId(Id), (char *)(FILE_), __LINE__,
-						plci->B1_resource, b1_resource, plci->adjust_b_facilities));
-				Info = _WRONG_STATE;
-				break;
-			}
-		}
-		if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
-		{
-
-			mixer_prepare_switch(Id, plci);
-
-
-			dtmf_prepare_switch(Id, plci);
-			dtmf_parameter_prepare_switch(Id, plci);
-
-
-			ec_prepare_switch(Id, plci);
-
-			adv_voice_prepare_switch(Id, plci);
-		}
-		plci->adjust_b_state = ADJUST_B_SAVE_MIXER_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_SAVE_MIXER_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
-		{
-
-			Info = mixer_save_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_SAVE_DTMF_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_SAVE_DTMF_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
-		{
-
-			Info = dtmf_save_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_REMOVE_L23_1;
-		/* fall through */
-	case ADJUST_B_REMOVE_L23_1:
-		if ((plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
-		    && plci->NL.Id && !plci->nl_remove_id)
-		{
-			plci->internal_command = plci->adjust_b_command;
-			if (plci->adjust_b_ncci != 0)
-			{
-				ncci_ptr = &(plci->adapter->ncci[plci->adjust_b_ncci]);
-				while (ncci_ptr->data_pending)
-				{
-					plci->data_sent_ptr = ncci_ptr->DBuffer[ncci_ptr->data_out].P;
-					data_rc(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
-				}
-				while (ncci_ptr->data_ack_pending)
-					data_ack(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
-			}
-			nl_req_ncci(plci, REMOVE,
-				    (byte)((plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) ? plci->adjust_b_ncci : 0));
-			send_req(plci);
-			plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
-			break;
-		}
-		plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_REMOVE_L23_2:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Adjust B remove failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			Info = _WRONG_STATE;
-			break;
-		}
-		if (plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
-		{
-			if (plci_nl_busy(plci))
-			{
-				plci->internal_command = plci->adjust_b_command;
-				break;
-			}
-		}
-		plci->adjust_b_state = ADJUST_B_SAVE_EC_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_SAVE_EC_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
-		{
-
-			Info = ec_save_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_SAVE_DTMF_PARAMETER_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_SAVE_DTMF_PARAMETER_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
-		{
-
-			Info = dtmf_parameter_save_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_SAVE_VOICE_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_SAVE_VOICE_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
-		{
-			Info = adv_voice_save_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-		}
-		plci->adjust_b_state = ADJUST_B_SWITCH_L1_1;
-		/* fall through */
-	case ADJUST_B_SWITCH_L1_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
-		{
-			if (plci->sig_req)
-			{
-				plci->internal_command = plci->adjust_b_command;
-				break;
-			}
-			if (plci->adjust_b_parms_msg != NULL)
-				api_load_msg(plci->adjust_b_parms_msg, bp);
-			else
-				api_load_msg(&plci->B_protocol, bp);
-			Info = add_b1(plci, bp,
-				      (word)((plci->adjust_b_mode & ADJUST_B_MODE_NO_RESOURCE) ? 2 : 0),
-				      plci->adjust_b_facilities);
-			if (Info != GOOD)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L1 parameters %d %04x",
-						UnMapId(Id), (char *)(FILE_), __LINE__,
-						plci->B1_resource, plci->adjust_b_facilities));
-				break;
-			}
-			plci->internal_command = plci->adjust_b_command;
-			sig_req(plci, RESOURCES, 0);
-			send_req(plci);
-			plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
-			break;
-		}
-		plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_SWITCH_L1_2:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Adjust B switch failed %02x %d %04x",
-					UnMapId(Id), (char *)(FILE_), __LINE__,
-					Rc, plci->B1_resource, plci->adjust_b_facilities));
-			Info = _WRONG_STATE;
-			break;
-		}
-		plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_RESTORE_VOICE_1:
-	case ADJUST_B_RESTORE_VOICE_2:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
-		{
-			Info = adv_voice_restore_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-		}
-		plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
-	case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
-		{
-
-			Info = dtmf_parameter_restore_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_RESTORE_EC_1:
-	case ADJUST_B_RESTORE_EC_2:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
-		{
-
-			Info = ec_restore_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_ASSIGN_L23_1;
-		/* fall through */
-	case ADJUST_B_ASSIGN_L23_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
-		{
-			if (plci_nl_busy(plci))
-			{
-				plci->internal_command = plci->adjust_b_command;
-				break;
-			}
-			if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
-				plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
-			if (plci->adjust_b_parms_msg != NULL)
-				api_load_msg(plci->adjust_b_parms_msg, bp);
-			else
-				api_load_msg(&plci->B_protocol, bp);
-			Info = add_b23(plci, bp);
-			if (Info != GOOD)
-			{
-				dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L23 parameters %04x",
-						UnMapId(Id), (char *)(FILE_), __LINE__, Info));
-				break;
-			}
-			plci->internal_command = plci->adjust_b_command;
-			nl_req_ncci(plci, ASSIGN, 0);
-			send_req(plci);
-			plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
-			break;
-		}
-		plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
-		Rc = ASSIGN_OK;
-		/* fall through */
-	case ADJUST_B_ASSIGN_L23_2:
-		if ((Rc != OK) && (Rc != OK_FC) && (Rc != ASSIGN_OK))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Adjust B assign failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			Info = _WRONG_STATE;
-			break;
-		}
-		if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
-		{
-			if (Rc != ASSIGN_OK)
-			{
-				plci->internal_command = plci->adjust_b_command;
-				break;
-			}
-		}
-		if (plci->adjust_b_mode & ADJUST_B_MODE_USER_CONNECT)
-		{
-			plci->adjust_b_restore = true;
-			break;
-		}
-		plci->adjust_b_state = ADJUST_B_CONNECT_1;
-		/* fall through */
-	case ADJUST_B_CONNECT_1:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
-		{
-			plci->internal_command = plci->adjust_b_command;
-			if (plci_nl_busy(plci))
-				break;
-			nl_req_ncci(plci, N_CONNECT, 0);
-			send_req(plci);
-			plci->adjust_b_state = ADJUST_B_CONNECT_2;
-			break;
-		}
-		plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_CONNECT_2:
-	case ADJUST_B_CONNECT_3:
-	case ADJUST_B_CONNECT_4:
-		if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Adjust B connect failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			Info = _WRONG_STATE;
-			break;
-		}
-		if (Rc == OK)
-		{
-			if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
-			{
-				get_ncci(plci, (byte)(Id >> 16), plci->adjust_b_ncci);
-				Id = (Id & 0xffff) | (((dword)(plci->adjust_b_ncci)) << 16);
-			}
-			if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
-				plci->adjust_b_state = ADJUST_B_CONNECT_3;
-			else if (plci->adjust_b_state == ADJUST_B_CONNECT_4)
-				plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
-		}
-		else if (Rc == 0)
-		{
-			if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
-				plci->adjust_b_state = ADJUST_B_CONNECT_4;
-			else if (plci->adjust_b_state == ADJUST_B_CONNECT_3)
-				plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
-		}
-		if (plci->adjust_b_state != ADJUST_B_RESTORE_DTMF_1)
-		{
-			plci->internal_command = plci->adjust_b_command;
-			break;
-		}
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_RESTORE_DTMF_1:
-	case ADJUST_B_RESTORE_DTMF_2:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
-		{
-
-			Info = dtmf_restore_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_RESTORE_MIXER_1:
-	case ADJUST_B_RESTORE_MIXER_2:
-	case ADJUST_B_RESTORE_MIXER_3:
-	case ADJUST_B_RESTORE_MIXER_4:
-	case ADJUST_B_RESTORE_MIXER_5:
-	case ADJUST_B_RESTORE_MIXER_6:
-	case ADJUST_B_RESTORE_MIXER_7:
-		if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
-		{
-
-			Info = mixer_restore_config(Id, plci, Rc);
-			if ((Info != GOOD) || plci->internal_command)
-				break;
-
-		}
-		plci->adjust_b_state = ADJUST_B_END;
-	case ADJUST_B_END:
-		break;
-	}
-	return (Info);
-}
-
-
-static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE   *bp_msg, word b1_facilities, word internal_command)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_resource %d %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__,
-			plci->B1_resource, b1_facilities));
-
-	plci->adjust_b_parms_msg = bp_msg;
-	plci->adjust_b_facilities = b1_facilities;
-	plci->adjust_b_command = internal_command;
-	plci->adjust_b_ncci = (word)(Id >> 16);
-	if ((bp_msg == NULL) && (plci->B1_resource == 0))
-		plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_SWITCH_L1;
-	else
-		plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_RESTORE;
-	plci->adjust_b_state = ADJUST_B_START;
-	dbug(1, dprintf("[%06lx] %s,%d: Adjust B1 resource %d %04x...",
-			UnMapId(Id), (char *)(FILE_), __LINE__,
-			plci->B1_resource, b1_facilities));
-}
-
-
-static void adjust_b_restore(dword Id, PLCI *plci, byte Rc)
-{
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: adjust_b_restore %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0;
-		if (plci->req_in != 0)
-		{
-			plci->internal_command = ADJUST_B_RESTORE_1;
-			break;
-		}
-		Rc = OK;
-		/* fall through */
-	case ADJUST_B_RESTORE_1:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Adjust B enqueued failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-		}
-		plci->adjust_b_parms_msg = NULL;
-		plci->adjust_b_facilities = plci->B1_facilities;
-		plci->adjust_b_command = ADJUST_B_RESTORE_2;
-		plci->adjust_b_ncci = (word)(Id >> 16);
-		plci->adjust_b_mode = ADJUST_B_MODE_RESTORE;
-		plci->adjust_b_state = ADJUST_B_START;
-		dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore...",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		/* fall through */
-	case ADJUST_B_RESTORE_2:
-		if (adjust_b_process(Id, plci, Rc) != GOOD)
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore failed",
-					UnMapId(Id), (char *)(FILE_), __LINE__));
-		}
-		if (plci->internal_command)
-			break;
-		break;
-	}
-}
-
-
-static void reset_b3_command(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: reset_b3_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	Info = GOOD;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0;
-		plci->adjust_b_parms_msg = NULL;
-		plci->adjust_b_facilities = plci->B1_facilities;
-		plci->adjust_b_command = RESET_B3_COMMAND_1;
-		plci->adjust_b_ncci = (word)(Id >> 16);
-		plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_CONNECT;
-		plci->adjust_b_state = ADJUST_B_START;
-		dbug(1, dprintf("[%06lx] %s,%d: Reset B3...",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		/* fall through */
-	case RESET_B3_COMMAND_1:
-		Info = adjust_b_process(Id, plci, Rc);
-		if (Info != GOOD)
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Reset failed",
-					UnMapId(Id), (char *)(FILE_), __LINE__));
-			break;
-		}
-		if (plci->internal_command)
-			return;
-		break;
-	}
-/*  sendf (plci->appl, _RESET_B3_R | CONFIRM, Id, plci->number, "w", Info);*/
-	sendf(plci->appl, _RESET_B3_I, Id, 0, "s", "");
-}
-
-
-static void select_b_command(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-	word internal_command;
-	byte esc_chi[3];
-
-	dbug(1, dprintf("[%06lx] %s,%d: select_b_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	Info = GOOD;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0;
-		plci->adjust_b_parms_msg = &plci->saved_msg;
-		if ((plci->tel == ADV_VOICE) && (plci == plci->adapter->AdvSignalPLCI))
-			plci->adjust_b_facilities = plci->B1_facilities | B1_FACILITY_VOICE;
-		else
-			plci->adjust_b_facilities = plci->B1_facilities & ~B1_FACILITY_VOICE;
-		plci->adjust_b_command = SELECT_B_COMMAND_1;
-		plci->adjust_b_ncci = (word)(Id >> 16);
-		if (plci->saved_msg.parms[0].length == 0)
-		{
-			plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
-				ADJUST_B_MODE_NO_RESOURCE;
-		}
-		else
-		{
-			plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
-				ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
-		}
-		plci->adjust_b_state = ADJUST_B_START;
-		dbug(1, dprintf("[%06lx] %s,%d: Select B protocol...",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		/* fall through */
-	case SELECT_B_COMMAND_1:
-		Info = adjust_b_process(Id, plci, Rc);
-		if (Info != GOOD)
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: Select B protocol failed",
-					UnMapId(Id), (char *)(FILE_), __LINE__));
-			break;
-		}
-		if (plci->internal_command)
-			return;
-		if (plci->tel == ADV_VOICE)
-		{
-			esc_chi[0] = 0x02;
-			esc_chi[1] = 0x18;
-			esc_chi[2] = plci->b_channel;
-			SetVoiceChannel(plci->adapter->AdvCodecPLCI, esc_chi, plci->adapter);
-		}
-		break;
-	}
-	sendf(plci->appl, _SELECT_B_REQ | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc)
-{
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: fax_connect_ack_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0; /* fall through */
-	case FAX_CONNECT_ACK_COMMAND_1:
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = FAX_CONNECT_ACK_COMMAND_1;
-			return;
-		}
-		plci->internal_command = FAX_CONNECT_ACK_COMMAND_2;
-		plci->NData[0].P = plci->fax_connect_info_buffer;
-		plci->NData[0].PLength = plci->fax_connect_info_length;
-		plci->NL.X = plci->NData;
-		plci->NL.ReqCh = 0;
-		plci->NL.Req = plci->nl_req = (byte) N_CONNECT_ACK;
-		plci->adapter->request(&plci->NL);
-		return;
-	case FAX_CONNECT_ACK_COMMAND_2:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: FAX issue CONNECT ACK failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			break;
-		}
-	}
-	if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
-	    && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
-	{
-		if (plci->B3_prot == 4)
-			sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-		else
-			sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
-		plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
-	}
-}
-
-
-static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc)
-{
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: fax_edata_ack_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0;
-		/* fall through */
-	case FAX_EDATA_ACK_COMMAND_1:
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = FAX_EDATA_ACK_COMMAND_1;
-			return;
-		}
-		plci->internal_command = FAX_EDATA_ACK_COMMAND_2;
-		plci->NData[0].P = plci->fax_connect_info_buffer;
-		plci->NData[0].PLength = plci->fax_edata_ack_length;
-		plci->NL.X = plci->NData;
-		plci->NL.ReqCh = 0;
-		plci->NL.Req = plci->nl_req = (byte) N_EDATA;
-		plci->adapter->request(&plci->NL);
-		return;
-	case FAX_EDATA_ACK_COMMAND_2:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: FAX issue EDATA ACK failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			break;
-		}
-	}
-}
-
-
-static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: fax_connect_info_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	Info = GOOD;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0; /* fall through */
-	case FAX_CONNECT_INFO_COMMAND_1:
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = FAX_CONNECT_INFO_COMMAND_1;
-			return;
-		}
-		plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
-		plci->NData[0].P = plci->fax_connect_info_buffer;
-		plci->NData[0].PLength = plci->fax_connect_info_length;
-		plci->NL.X = plci->NData;
-		plci->NL.ReqCh = 0;
-		plci->NL.Req = plci->nl_req = (byte) N_EDATA;
-		plci->adapter->request(&plci->NL);
-		return;
-	case FAX_CONNECT_INFO_COMMAND_2:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: FAX setting connect info failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			Info = _WRONG_STATE;
-			break;
-		}
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
-			return;
-		}
-		plci->command = _CONNECT_B3_R;
-		nl_req_ncci(plci, N_CONNECT, 0);
-		send_req(plci);
-		return;
-	}
-	sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: fax_adjust_b23_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	Info = GOOD;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0;
-		plci->adjust_b_parms_msg = NULL;
-		plci->adjust_b_facilities = plci->B1_facilities;
-		plci->adjust_b_command = FAX_ADJUST_B23_COMMAND_1;
-		plci->adjust_b_ncci = (word)(Id >> 16);
-		plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23;
-		plci->adjust_b_state = ADJUST_B_START;
-		dbug(1, dprintf("[%06lx] %s,%d: FAX adjust B23...",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		/* fall through */
-	case FAX_ADJUST_B23_COMMAND_1:
-		Info = adjust_b_process(Id, plci, Rc);
-		if (Info != GOOD)
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: FAX adjust failed",
-					UnMapId(Id), (char *)(FILE_), __LINE__));
-			break;
-		}
-		if (plci->internal_command)
-			return;
-		/* fall through */
-	case FAX_ADJUST_B23_COMMAND_2:
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = FAX_ADJUST_B23_COMMAND_2;
-			return;
-		}
-		plci->command = _CONNECT_B3_R;
-		nl_req_ncci(plci, N_CONNECT, 0);
-		send_req(plci);
-		return;
-	}
-	sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc)
-{
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: fax_disconnect_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0;
-		plci->internal_command = FAX_DISCONNECT_COMMAND_1;
-		return;
-	case FAX_DISCONNECT_COMMAND_1:
-	case FAX_DISCONNECT_COMMAND_2:
-	case FAX_DISCONNECT_COMMAND_3:
-		if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: FAX disconnect EDATA failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			break;
-		}
-		if (Rc == OK)
-		{
-			if ((internal_command == FAX_DISCONNECT_COMMAND_1)
-			    || (internal_command == FAX_DISCONNECT_COMMAND_2))
-			{
-				plci->internal_command = FAX_DISCONNECT_COMMAND_2;
-			}
-		}
-		else if (Rc == 0)
-		{
-			if (internal_command == FAX_DISCONNECT_COMMAND_1)
-				plci->internal_command = FAX_DISCONNECT_COMMAND_3;
-		}
-		return;
-	}
-}
-
-
-
-static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc)
-{
-	word Info;
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_req_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	Info = GOOD;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0; /* fall through */
-	case RTP_CONNECT_B3_REQ_COMMAND_1:
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_1;
-			return;
-		}
-		plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
-		nl_req_ncci(plci, N_CONNECT, 0);
-		send_req(plci);
-		return;
-	case RTP_CONNECT_B3_REQ_COMMAND_2:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect info failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			Info = _WRONG_STATE;
-			break;
-		}
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
-			return;
-		}
-		plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_3;
-		plci->NData[0].PLength = plci->internal_req_buffer[0];
-		plci->NData[0].P = plci->internal_req_buffer + 1;
-		plci->NL.X = plci->NData;
-		plci->NL.ReqCh = 0;
-		plci->NL.Req = plci->nl_req = (byte) N_UDATA;
-		plci->adapter->request(&plci->NL);
-		break;
-	case RTP_CONNECT_B3_REQ_COMMAND_3:
-		return;
-	}
-	sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc)
-{
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_res_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0; /* fall through */
-	case RTP_CONNECT_B3_RES_COMMAND_1:
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_1;
-			return;
-		}
-		plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
-		nl_req_ncci(plci, N_CONNECT_ACK, (byte)(Id >> 16));
-		send_req(plci);
-		return;
-	case RTP_CONNECT_B3_RES_COMMAND_2:
-		if ((Rc != OK) && (Rc != OK_FC))
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect resp info failed %02x",
-					UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
-			break;
-		}
-		if (plci_nl_busy(plci))
-		{
-			plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
-			return;
-		}
-		sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
-		plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_3;
-		plci->NData[0].PLength = plci->internal_req_buffer[0];
-		plci->NData[0].P = plci->internal_req_buffer + 1;
-		plci->NL.X = plci->NData;
-		plci->NL.ReqCh = 0;
-		plci->NL.Req = plci->nl_req = (byte) N_UDATA;
-		plci->adapter->request(&plci->NL);
-		return;
-	case RTP_CONNECT_B3_RES_COMMAND_3:
-		return;
-	}
-}
-
-
-
-static void hold_save_command(dword Id, PLCI *plci, byte Rc)
-{
-	byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
-	word Info;
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: hold_save_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	Info = GOOD;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		if (!plci->NL.Id)
-			break;
-		plci->command = 0;
-		plci->adjust_b_parms_msg = NULL;
-		plci->adjust_b_facilities = plci->B1_facilities;
-		plci->adjust_b_command = HOLD_SAVE_COMMAND_1;
-		plci->adjust_b_ncci = (word)(Id >> 16);
-		plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23;
-		plci->adjust_b_state = ADJUST_B_START;
-		dbug(1, dprintf("[%06lx] %s,%d: HOLD save...",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		/* fall through */
-	case HOLD_SAVE_COMMAND_1:
-		Info = adjust_b_process(Id, plci, Rc);
-		if (Info != GOOD)
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: HOLD save failed",
-					UnMapId(Id), (char *)(FILE_), __LINE__));
-			break;
-		}
-		if (plci->internal_command)
-			return;
-	}
-	sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
-}
-
-
-static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc)
-{
-	byte SS_Ind[] = "\x05\x03\x00\x02\x00\x00"; /* Retrieve_Ind struct*/
-	word Info;
-	word internal_command;
-
-	dbug(1, dprintf("[%06lx] %s,%d: retrieve_restore_command %02x %04x",
-			UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
-	Info = GOOD;
-	internal_command = plci->internal_command;
-	plci->internal_command = 0;
-	switch (internal_command)
-	{
-	default:
-		plci->command = 0;
-		plci->adjust_b_parms_msg = NULL;
-		plci->adjust_b_facilities = plci->B1_facilities;
-		plci->adjust_b_command = RETRIEVE_RESTORE_COMMAND_1;
-		plci->adjust_b_ncci = (word)(Id >> 16);
-		plci->adjust_b_mode = ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
-		plci->adjust_b_state = ADJUST_B_START;
-		dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore...",
-				UnMapId(Id), (char *)(FILE_), __LINE__));
-		/* fall through */
-	case RETRIEVE_RESTORE_COMMAND_1:
-		Info = adjust_b_process(Id, plci, Rc);
-		if (Info != GOOD)
-		{
-			dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore failed",
-					UnMapId(Id), (char *)(FILE_), __LINE__));
-			break;
-		}
-		if (plci->internal_command)
-			return;
-	}
-	sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
-}
-
-
-static void init_b1_config(PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: init_b1_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	plci->B1_resource = 0;
-	plci->B1_facilities = 0;
-
-	plci->li_bchannel_id = 0;
-	mixer_clear_config(plci);
-
-
-	ec_clear_config(plci);
-
-
-	dtmf_rec_clear_config(plci);
-	dtmf_send_clear_config(plci);
-	dtmf_parameter_clear_config(plci);
-
-	adv_voice_clear_config(plci);
-	adjust_b_clear(plci);
-}
-
-
-static void clear_b1_config(PLCI *plci)
-{
-
-	dbug(1, dprintf("[%06lx] %s,%d: clear_b1_config",
-			(dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
-			(char *)(FILE_), __LINE__));
-
-	adv_voice_clear_config(plci);
-	adjust_b_clear(plci);
-
-	ec_clear_config(plci);
-
-
-	dtmf_rec_clear_config(plci);
-	dtmf_send_clear_config(plci);
-	dtmf_parameter_clear_config(plci);
-
-
-	if ((plci->li_bchannel_id != 0)
-	    && (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci == plci))
-	{
-		mixer_clear_config(plci);
-		li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = NULL;
-		plci->li_bchannel_id = 0;
-	}
-
-	plci->B1_resource = 0;
-	plci->B1_facilities = 0;
-}
-
-
-/* -----------------------------------------------------------------
-   XON protocol local helpers
-   ----------------------------------------------------------------- */
-static void channel_flow_control_remove(PLCI *plci) {
-	DIVA_CAPI_ADAPTER *a = plci->adapter;
-	word i;
-	for (i = 1; i < MAX_NL_CHANNEL + 1; i++) {
-		if (a->ch_flow_plci[i] == plci->Id) {
-			a->ch_flow_plci[i] = 0;
-			a->ch_flow_control[i] = 0;
-		}
-	}
-}
-
-static void channel_x_on(PLCI *plci, byte ch) {
-	DIVA_CAPI_ADAPTER *a = plci->adapter;
-	if (a->ch_flow_control[ch] & N_XON_SENT) {
-		a->ch_flow_control[ch] &= ~N_XON_SENT;
-	}
-}
-
-static void channel_x_off(PLCI *plci, byte ch, byte flag) {
-	DIVA_CAPI_ADAPTER *a = plci->adapter;
-	if ((a->ch_flow_control[ch] & N_RX_FLOW_CONTROL_MASK) == 0) {
-		a->ch_flow_control[ch] |= (N_CH_XOFF | flag);
-		a->ch_flow_plci[ch] = plci->Id;
-		a->ch_flow_control_pending++;
-	}
-}
-
-static void channel_request_xon(PLCI *plci, byte ch) {
-	DIVA_CAPI_ADAPTER *a = plci->adapter;
-
-	if (a->ch_flow_control[ch] & N_CH_XOFF) {
-		a->ch_flow_control[ch] |= N_XON_REQ;
-		a->ch_flow_control[ch] &= ~N_CH_XOFF;
-		a->ch_flow_control[ch] &= ~N_XON_CONNECT_IND;
-	}
-}
-
-static void channel_xmit_extended_xon(PLCI *plci) {
-	DIVA_CAPI_ADAPTER *a;
-	int max_ch = ARRAY_SIZE(a->ch_flow_control);
-	int i, one_requested = 0;
-
-	if ((!plci) || (!plci->Id) || ((a = plci->adapter) == NULL)) {
-		return;
-	}
-
-	for (i = 0; i < max_ch; i++) {
-		if ((a->ch_flow_control[i] & N_CH_XOFF) &&
-		    (a->ch_flow_control[i] & N_XON_CONNECT_IND) &&
-		    (plci->Id == a->ch_flow_plci[i])) {
-			channel_request_xon(plci, (byte)i);
-			one_requested = 1;
-		}
-	}
-
-	if (one_requested) {
-		channel_xmit_xon(plci);
-	}
-}
-
-/*
-  Try to xmit next X_ON
-*/
-static int find_channel_with_pending_x_on(DIVA_CAPI_ADAPTER *a, PLCI *plci) {
-	int max_ch = ARRAY_SIZE(a->ch_flow_control);
-	int i;
-
-	if (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)) {
-		return (0);
-	}
-
-	if (a->last_flow_control_ch >= max_ch) {
-		a->last_flow_control_ch = 1;
-	}
-	for (i = a->last_flow_control_ch; i < max_ch; i++) {
-		if ((a->ch_flow_control[i] & N_XON_REQ) &&
-		    (plci->Id == a->ch_flow_plci[i])) {
-			a->last_flow_control_ch = i + 1;
-			return (i);
-		}
-	}
-
-	for (i = 1; i < a->last_flow_control_ch; i++) {
-		if ((a->ch_flow_control[i] & N_XON_REQ) &&
-		    (plci->Id == a->ch_flow_plci[i])) {
-			a->last_flow_control_ch = i + 1;
-			return (i);
-		}
-	}
-
-	return (0);
-}
-
-static void channel_xmit_xon(PLCI *plci) {
-	DIVA_CAPI_ADAPTER *a = plci->adapter;
-	byte ch;
-
-	if (plci->nl_req || !plci->NL.Id || plci->nl_remove_id) {
-		return;
-	}
-	if ((ch = (byte)find_channel_with_pending_x_on(a, plci)) == 0) {
-		return;
-	}
-	a->ch_flow_control[ch] &= ~N_XON_REQ;
-	a->ch_flow_control[ch] |= N_XON_SENT;
-
-	plci->NL.Req = plci->nl_req = (byte)N_XON;
-	plci->NL.ReqCh         = ch;
-	plci->NL.X             = plci->NData;
-	plci->NL.XNum          = 1;
-	plci->NData[0].P       = &plci->RBuffer[0];
-	plci->NData[0].PLength = 0;
-
-	plci->adapter->request(&plci->NL);
-}
-
-static int channel_can_xon(PLCI *plci, byte ch) {
-	APPL *APPLptr;
-	DIVA_CAPI_ADAPTER *a;
-	word NCCIcode;
-	dword count;
-	word Num;
-	word i;
-
-	APPLptr = plci->appl;
-	a = plci->adapter;
-
-	if (!APPLptr)
-		return (0);
-
-	NCCIcode = a->ch_ncci[ch] | (((word) a->Id) << 8);
-
-	/* count all buffers within the Application pool    */
-	/* belonging to the same NCCI. XON if a first is    */
-	/* used.                                            */
-	count = 0;
-	Num = 0xffff;
-	for (i = 0; i < APPLptr->MaxBuffer; i++) {
-		if (NCCIcode == APPLptr->DataNCCI[i]) count++;
-		if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
-	}
-	if ((count > 2) || (Num == 0xffff)) {
-		return (0);
-	}
-	return (1);
-}
-
-
-/*------------------------------------------------------------------*/
-
-static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *a, word offset)
-{
-	return 1;
-}
-
-
-
-/**********************************************************************************/
-/* function groups the listening applications according to the CIP mask and the   */
-/* Info_Mask. Each group gets just one Connect_Ind. Some application manufacturer */
-/* are not multi-instance capable, so they start e.g. 30 applications what causes */
-/* big problems on application level (one call, 30 Connect_Ind, ect). The         */
-/* function must be enabled by setting "a->group_optimization_enabled" from the   */
-/* OS specific part (per adapter).                                                */
-/**********************************************************************************/
-static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
-{
-	word i, j, k, busy, group_found;
-	dword info_mask_group[MAX_CIP_TYPES];
-	dword cip_mask_group[MAX_CIP_TYPES];
-	word appl_number_group_type[MAX_APPL];
-	PLCI *auxplci;
-
-	/* all APPLs within this inc. call are allowed to dial in */
-	bitmap_fill(plci->group_optimization_mask_table, MAX_APPL);
-
-	if (!a->group_optimization_enabled)
-	{
-		dbug(1, dprintf("No group optimization"));
-		return;
-	}
-
-	dbug(1, dprintf("Group optimization = 0x%x...", a->group_optimization_enabled));
-
-	for (i = 0; i < MAX_CIP_TYPES; i++)
-	{
-		info_mask_group[i] = 0;
-		cip_mask_group[i] = 0;
-	}
-	for (i = 0; i < MAX_APPL; i++)
-	{
-		appl_number_group_type[i] = 0;
-	}
-	for (i = 0; i < max_appl; i++) /* check if any multi instance capable application is present */
-	{  /* group_optimization set to 1 means not to optimize multi-instance capable applications (default) */
-		if (application[i].Id && (application[i].MaxNCCI) > 1 && (a->CIP_Mask[i]) && (a->group_optimization_enabled == 1))
-		{
-			dbug(1, dprintf("Multi-Instance capable, no optimization required"));
-			return; /* allow good application unfiltered access */
-		}
-	}
-	for (i = 0; i < max_appl; i++) /* Build CIP Groups */
-	{
-		if (application[i].Id && a->CIP_Mask[i])
-		{
-			for (k = 0, busy = false; k < a->max_plci; k++)
-			{
-				if (a->plci[k].Id)
-				{
-					auxplci = &a->plci[k];
-					if (auxplci->appl == &application[i]) {
-						/* application has a busy PLCI */
-						busy = true;
-						dbug(1, dprintf("Appl 0x%x is busy", i + 1));
-					} else if (test_bit(i, plci->c_ind_mask_table)) {
-						/* application has an incoming call pending */
-						busy = true;
-						dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1));
-					}
-				}
-			}
-
-			for (j = 0, group_found = 0; j <= (MAX_CIP_TYPES) && !busy && !group_found; j++)     /* build groups with free applications only */
-			{
-				if (j == MAX_CIP_TYPES)       /* all groups are in use but group still not found */
-				{                           /* the MAX_CIP_TYPES group enables all calls because of field overflow */
-					appl_number_group_type[i] = MAX_CIP_TYPES;
-					group_found = true;
-					dbug(1, dprintf("Field overflow appl 0x%x", i + 1));
-				}
-				else if ((info_mask_group[j] == a->CIP_Mask[i]) && (cip_mask_group[j] == a->Info_Mask[i]))
-				{                                      /* is group already present ?                  */
-					appl_number_group_type[i] = j | 0x80;  /* store the group number for each application */
-					group_found = true;
-					dbug(1, dprintf("Group 0x%x found with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
-				}
-				else if (!info_mask_group[j])
-				{                                      /* establish a new group                       */
-					appl_number_group_type[i] = j | 0x80;  /* store the group number for each application */
-					info_mask_group[j] = a->CIP_Mask[i]; /* store the new CIP mask for the new group    */
-					cip_mask_group[j] = a->Info_Mask[i]; /* store the new Info_Mask for this new group  */
-					group_found = true;
-					dbug(1, dprintf("New Group 0x%x established with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
-				}
-			}
-		}
-	}
-
-	for (i = 0; i < max_appl; i++) /* Build group_optimization_mask_table */
-	{
-		if (appl_number_group_type[i]) /* application is free, has listens and is member of a group */
-		{
-			if (appl_number_group_type[i] == MAX_CIP_TYPES)
-			{
-				dbug(1, dprintf("OverflowGroup 0x%x, valid appl = 0x%x, call enabled", appl_number_group_type[i], i + 1));
-			}
-			else
-			{
-				dbug(1, dprintf("Group 0x%x, valid appl = 0x%x", appl_number_group_type[i], i + 1));
-				for (j = i + 1; j < max_appl; j++)   /* search other group members and mark them as busy        */
-				{
-					if (appl_number_group_type[i] == appl_number_group_type[j])
-					{
-						dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j]));
-						/* disable call on other group members */
-						__clear_bit(j, plci->group_optimization_mask_table);
-						appl_number_group_type[j] = 0;       /* remove disabled group member from group list */
-					}
-				}
-			}
-		}
-		else                                                 /* application should not get a call */
-		{
-			__clear_bit(i, plci->group_optimization_mask_table);
-		}
-	}
-
-}
-
-
-
-/* OS notifies the driver about a application Capi_Register */
-word CapiRegister(word id)
-{
-	word i, j, appls_found;
-
-	PLCI *plci;
-	DIVA_CAPI_ADAPTER *a;
-
-	for (i = 0, appls_found = 0; i < max_appl; i++)
-	{
-		if (application[i].Id && (application[i].Id != id))
-		{
-			appls_found++;                       /* an application has been found */
-		}
-	}
-
-	if (appls_found) return true;
-	for (i = 0; i < max_adapter; i++)                   /* scan all adapters...    */
-	{
-		a = &adapter[i];
-		if (a->request)
-		{
-			if (a->flag_dynamic_l1_down)  /* remove adapter from L1 tristate (Huntgroup) */
-			{
-				if (!appls_found)           /* first application does a capi register   */
-				{
-					if ((j = get_plci(a)))                    /* activate L1 of all adapters */
-					{
-						plci = &a->plci[j - 1];
-						plci->command = 0;
-						add_p(plci, OAD, "\x01\xfd");
-						add_p(plci, CAI, "\x01\x80");
-						add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
-						add_p(plci, SHIFT | 6, NULL);
-						add_p(plci, SIN, "\x02\x00\x00");
-						plci->internal_command = START_L1_SIG_ASSIGN_PEND;
-						sig_req(plci, ASSIGN, DSIG_ID);
-						add_p(plci, FTY, "\x02\xff\x07"); /* l1 start */
-						sig_req(plci, SIG_CTRL, 0);
-						send_req(plci);
-					}
-				}
-			}
-		}
-	}
-	return false;
-}
-
-/*------------------------------------------------------------------*/
-
-/* Functions for virtual Switching e.g. Transfer by join, Conference */
-
-static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms)
-{
-	word i;
-	/* Format of vswitch_t:
-	   0 byte length
-	   1 byte VSWITCHIE
-	   2 byte VSWITCH_REQ/VSWITCH_IND
-	   3 byte reserved
-	   4 word VSwitchcommand
-	   6 word returnerror
-	   8... Params
-	*/
-	if (!plci ||
-	    !plci->appl ||
-	    !plci->State ||
-	    plci->Sig.Ind == NCR_FACILITY
-		)
-		return;
-
-	for (i = 0; i < MAX_MULTI_IE; i++)
-	{
-		if (!parms[i][0]) continue;
-		if (parms[i][0] < 7)
-		{
-			parms[i][0] = 0; /* kill it */
-			continue;
-		}
-		dbug(1, dprintf("VSwitchReqInd(%d)", parms[i][4]));
-		switch (parms[i][4])
-		{
-		case VSJOIN:
-			if (!plci->relatedPTYPLCI ||
-			    (plci->ptyState != S_ECT && plci->relatedPTYPLCI->ptyState != S_ECT))
-			{ /* Error */
-				break;
-			}
-			/* remember all necessary informations */
-			if (parms[i][0] != 11 || parms[i][8] != 3) /* Length Test */
-			{
-				break;
-			}
-			if (parms[i][2] == VSWITCH_IND && parms[i][9] == 1)
-			{   /* first indication after ECT-Request on Consultation Call */
-				plci->vswitchstate = parms[i][9];
-				parms[i][9] = 2; /* State */
-				/* now ask first Call to join */
-			}
-			else if (parms[i][2] == VSWITCH_REQ && parms[i][9] == 3)
-			{ /* Answer of VSWITCH_REQ from first Call */
-				plci->vswitchstate = parms[i][9];
-				/* tell consultation call to join
-				   and the protocol capabilities of the first call */
-			}
-			else
-			{ /* Error */
-				break;
-			}
-			plci->vsprot = parms[i][10]; /* protocol */
-			plci->vsprotdialect = parms[i][11]; /* protocoldialect */
-			/* send join request to related PLCI */
-			parms[i][1] = VSWITCHIE;
-			parms[i][2] = VSWITCH_REQ;
-
-			plci->relatedPTYPLCI->command = 0;
-			plci->relatedPTYPLCI->internal_command = VSWITCH_REQ_PEND;
-			add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
-			sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
-			send_req(plci->relatedPTYPLCI);
-			break;
-		case VSTRANSPORT:
-		default:
-			if (plci->relatedPTYPLCI &&
-			    plci->vswitchstate == 3 &&
-			    plci->relatedPTYPLCI->vswitchstate == 3)
-			{
-				add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
-				sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
-				send_req(plci->relatedPTYPLCI);
-			}
-			break;
-		}
-		parms[i][0] = 0; /* kill it */
-	}
-}
-
-
-/*------------------------------------------------------------------*/
-
-static int diva_get_dma_descriptor(PLCI *plci, dword   *dma_magic) {
-	ENTITY e;
-	IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
-	if (!(diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_RX_DMA)) {
-		return (-1);
-	}
-
-	pReq->xdi_dma_descriptor_operation.Req = 0;
-	pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
-	pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_number  = -1;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_magic   = 0;
-
-	e.user[0] = plci->adapter->Id - 1;
-	plci->adapter->request((ENTITY *)pReq);
-
-	if (!pReq->xdi_dma_descriptor_operation.info.operation &&
-	    (pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) &&
-	    pReq->xdi_dma_descriptor_operation.info.descriptor_magic) {
-		*dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic;
-		dbug(3, dprintf("dma_alloc, a:%d (%d-%08x)",
-				plci->adapter->Id,
-				pReq->xdi_dma_descriptor_operation.info.descriptor_number,
-				*dma_magic));
-		return (pReq->xdi_dma_descriptor_operation.info.descriptor_number);
-	} else {
-		dbug(1, dprintf("dma_alloc failed"));
-		return (-1);
-	}
-}
-
-static void diva_free_dma_descriptor(PLCI *plci, int nr) {
-	ENTITY e;
-	IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
-	if (nr < 0) {
-		return;
-	}
-
-	pReq->xdi_dma_descriptor_operation.Req = 0;
-	pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
-	pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_number  = nr;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
-	pReq->xdi_dma_descriptor_operation.info.descriptor_magic   = 0;
-
-	e.user[0] = plci->adapter->Id - 1;
-	plci->adapter->request((ENTITY *)pReq);
-
-	if (!pReq->xdi_dma_descriptor_operation.info.operation) {
-		dbug(1, dprintf("dma_free(%d)", nr));
-	} else {
-		dbug(1, dprintf("dma_free failed (%d)", nr));
-	}
-}
-
-/*------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/mi_pc.h b/drivers/isdn/hardware/eicon/mi_pc.h
deleted file mode 100644
index 83e9ed8..0000000
--- a/drivers/isdn/hardware/eicon/mi_pc.h
+++ /dev/null
@@ -1,204 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-/*----------------------------------------------------------------------------
-// MAESTRA ISA PnP */
-#define BRI_MEMORY_BASE                 0x1f700000
-#define BRI_MEMORY_SIZE                 0x00100000  /* 1MB on the BRI                         */
-#define BRI_SHARED_RAM_SIZE             0x00010000  /* 64k shared RAM                         */
-#define BRI_RAY_TAYLOR_DSP_CODE_SIZE    0x00020000  /* max 128k DSP-Code (Ray Taylor's code)  */
-#define BRI_ORG_MAX_DSP_CODE_SIZE       0x00050000  /* max 320k DSP-Code (Telindus)           */
-#define BRI_V90D_MAX_DSP_CODE_SIZE      0x00060000  /* max 384k DSP-Code if V.90D included    */
-#define BRI_CACHED_ADDR(x)              (((x) & 0x1fffffffL) | 0x80000000L)
-#define BRI_UNCACHED_ADDR(x)            (((x) & 0x1fffffffL) | 0xa0000000L)
-#define ADDR  4
-#define ADDRH 6
-#define DATA  0
-#define RESET 7
-#define DEFAULT_ADDRESS 0x240
-#define DEFAULT_IRQ     3
-#define M_PCI_ADDR   0x04  /* MAESTRA BRI PCI */
-#define M_PCI_ADDRH  0x0c  /* MAESTRA BRI PCI */
-#define M_PCI_DATA   0x00  /* MAESTRA BRI PCI */
-#define M_PCI_RESET  0x10  /* MAESTRA BRI PCI */
-/*----------------------------------------------------------------------------
-// MAESTRA PRI PCI */
-#define MP_IRQ_RESET                    0xc18       /* offset of isr in the CONFIG memory bar */
-#define MP_IRQ_RESET_VAL                0xfe        /* value to clear an interrupt            */
-#define MP_MEMORY_SIZE                  0x00400000  /* 4MB on standard PRI                    */
-#define MP2_MEMORY_SIZE                 0x00800000  /* 8MB on PRI Rev. 2                      */
-#define MP_SHARED_RAM_OFFSET            0x00001000  /* offset of shared RAM base in the DRAM memory bar */
-#define MP_SHARED_RAM_SIZE              0x00010000  /* 64k shared RAM                         */
-#define MP_PROTOCOL_OFFSET              (MP_SHARED_RAM_OFFSET + MP_SHARED_RAM_SIZE)
-#define MP_RAY_TAYLOR_DSP_CODE_SIZE     0x00040000  /* max 256k DSP-Code (Ray Taylor's code)  */
-#define MP_ORG_MAX_DSP_CODE_SIZE        0x00060000  /* max 384k DSP-Code (Telindus)           */
-#define MP_V90D_MAX_DSP_CODE_SIZE       0x00070000  /* max 448k DSP-Code if V.90D included)   */
-#define MP_VOIP_MAX_DSP_CODE_SIZE       0x00090000  /* max 576k DSP-Code if voice over IP included */
-#define MP_CACHED_ADDR(x)               (((x) & 0x1fffffffL) | 0x80000000L)
-#define MP_UNCACHED_ADDR(x)             (((x) & 0x1fffffffL) | 0xa0000000L)
-#define MP_RESET         0x20        /* offset of RESET register in the DEVICES memory bar */
-/* RESET register bits */
-#define _MP_S2M_RESET    0x10        /* active lo   */
-#define _MP_LED2         0x08        /* 1 = on      */
-#define _MP_LED1         0x04        /* 1 = on      */
-#define _MP_DSP_RESET    0x02        /* active lo   */
-#define _MP_RISC_RESET   0x81        /* active hi, bit 7 for compatibility with old boards */
-/* CPU exception context structure in MP shared ram after trap */
-typedef struct mp_xcptcontext_s MP_XCPTC;
-struct mp_xcptcontext_s {
-	dword       sr;
-	dword       cr;
-	dword       epc;
-	dword       vaddr;
-	dword       regs[32];
-	dword       mdlo;
-	dword       mdhi;
-	dword       reseverd;
-	dword       xclass;
-};
-/* boot interface structure for PRI */
-struct mp_load {
-	dword     volatile cmd;
-	dword     volatile addr;
-	dword     volatile len;
-	dword     volatile err;
-	dword     volatile live;
-	dword     volatile res1[0x1b];
-	dword     volatile TrapId;    /* has value 0x999999XX on a CPU trap */
-	dword     volatile res2[0x03];
-	MP_XCPTC  volatile xcpt;      /* contains register dump */
-	dword     volatile rest[((0x1020 >> 2) - 6) - 0x1b - 1 - 0x03 - (sizeof(MP_XCPTC) >> 2)];
-	dword     volatile signature;
-	dword data[60000]; /* real interface description */
-};
-/*----------------------------------------------------------------------------*/
-/* SERVER 4BRI (Quattro PCI)                                                  */
-#define MQ_BOARD_REG_OFFSET             0x800000    /* PC relative On board registers offset  */
-#define MQ_BREG_RISC                    0x1200      /* RISC Reset ect                         */
-#define MQ_RISC_COLD_RESET_MASK         0x0001      /* RISC Cold reset                        */
-#define MQ_RISC_WARM_RESET_MASK         0x0002      /* RISC Warm reset                        */
-#define MQ_BREG_IRQ_TEST                0x0608      /* Interrupt request, no CPU interaction  */
-#define MQ_IRQ_REQ_ON                   0x1
-#define MQ_IRQ_REQ_OFF                  0x0
-#define MQ_BOARD_DSP_OFFSET             0xa00000    /* PC relative On board DSP regs offset   */
-#define MQ_DSP1_ADDR_OFFSET             0x0008      /* Addr register offset DSP 1 subboard 1  */
-#define MQ_DSP2_ADDR_OFFSET             0x0208      /* Addr register offset DSP 2 subboard 1  */
-#define MQ_DSP1_DATA_OFFSET             0x0000      /* Data register offset DSP 1 subboard 1  */
-#define MQ_DSP2_DATA_OFFSET             0x0200      /* Data register offset DSP 2 subboard 1  */
-#define MQ_DSP_JUNK_OFFSET              0x0400      /* DSP Data/Addr regs subboard offset     */
-#define MQ_ISAC_DSP_RESET               0x0028      /* ISAC and DSP reset address offset      */
-#define MQ_BOARD_ISAC_DSP_RESET         0x800028    /* ISAC and DSP reset address offset      */
-#define MQ_INSTANCE_COUNT               4           /* 4BRI consists of four instances        */
-#define MQ_MEMORY_SIZE                  0x00400000  /* 4MB on standard 4BRI                   */
-#define MQ_CTRL_SIZE                    0x00002000  /* 8K memory mapped registers             */
-#define MQ_SHARED_RAM_SIZE              0x00010000  /* 64k shared RAM                         */
-#define MQ_ORG_MAX_DSP_CODE_SIZE        0x00050000  /* max 320k DSP-Code (Telindus) */
-#define MQ_V90D_MAX_DSP_CODE_SIZE       0x00060000  /* max 384K DSP-Code if V.90D included */
-#define MQ_VOIP_MAX_DSP_CODE_SIZE       0x00028000  /* max 4*160k = 640K DSP-Code if voice over IP included */
-#define MQ_CACHED_ADDR(x)               (((x) & 0x1fffffffL) | 0x80000000L)
-#define MQ_UNCACHED_ADDR(x)             (((x) & 0x1fffffffL) | 0xa0000000L)
-/*--------------------------------------------------------------------------------------------*/
-/* Additional definitions reflecting the different address map of the  SERVER 4BRI V2          */
-#define MQ2_BREG_RISC                   0x0200      /* RISC Reset ect                         */
-#define MQ2_BREG_IRQ_TEST               0x0400      /* Interrupt request, no CPU interaction  */
-#define MQ2_BOARD_DSP_OFFSET            0x800000    /* PC relative On board DSP regs offset   */
-#define MQ2_DSP1_DATA_OFFSET            0x1800      /* Data register offset DSP 1 subboard 1  */
-#define MQ2_DSP1_ADDR_OFFSET            0x1808      /* Addr register offset DSP 1 subboard 1  */
-#define MQ2_DSP2_DATA_OFFSET            0x1810      /* Data register offset DSP 2 subboard 1  */
-#define MQ2_DSP2_ADDR_OFFSET            0x1818      /* Addr register offset DSP 2 subboard 1  */
-#define MQ2_DSP_JUNK_OFFSET             0x1000      /* DSP Data/Addr regs subboard offset     */
-#define MQ2_ISAC_DSP_RESET              0x0000      /* ISAC and DSP reset address offset      */
-#define MQ2_BOARD_ISAC_DSP_RESET        0x800000    /* ISAC and DSP reset address offset      */
-#define MQ2_IPACX_CONFIG                0x0300      /* IPACX Configuration TE(0)/NT(1)        */
-#define MQ2_BOARD_IPACX_CONFIG          0x800300    /*     ""                                 */
-#define MQ2_MEMORY_SIZE                 0x01000000  /* 16MB code/data memory                  */
-#define MQ2_CTRL_SIZE                   0x00008000  /* 32K memory mapped registers            */
-/*----------------------------------------------------------------------------*/
-/* SERVER BRI 2M/2F as derived from 4BRI V2                                   */
-#define BRI2_MEMORY_SIZE                0x00800000  /* 8MB code/data memory                   */
-#define BRI2_PROTOCOL_MEMORY_SIZE       (MQ2_MEMORY_SIZE >> 2) /*  same as one 4BRI Rev.2 task */
-#define BRI2_CTRL_SIZE                  0x00008000  /* 32K memory mapped registers            */
-#define M_INSTANCE_COUNT                1           /*  BRI consists of one instance          */
-/*
- * Some useful constants for proper initialization of the GT6401x
- */
-#define ID_REG        0x0000      /*Pci reg-contain the Dev&Ven ID of the card*/
-#define RAS0_BASEREG  0x0010      /*Ras0 register - contain the base addr Ras0*/
-#define RAS2_BASEREG  0x0014
-#define CS_BASEREG    0x0018
-#define BOOT_BASEREG  0x001c
-#define GTREGS_BASEREG 0x0024   /*GTRegsBase reg-contain the base addr where*/
-				/*the GT64010 internal regs where mapped    */
-/*
- *  GT64010 internal registers
- */
-/* DRAM device coding  */
-#define LOW_RAS0_DREG 0x0400    /*Ras0 low decode address*/
-#define HI_RAS0_DREG  0x0404    /*Ras0 high decode address*/
-#define LOW_RAS1_DREG 0x0408    /*Ras1 low decode address*/
-#define HI_RAS1_DREG  0x040c    /*Ras1 high decode address*/
-#define LOW_RAS2_DREG 0x0410    /*Ras2 low decode address*/
-#define HI_RAS2_DREG  0x0414    /*Ras2 high decode address*/
-#define LOW_RAS3_DREG 0x0418    /*Ras3 low decode address*/
-#define HI_RAS3_DREG  0x041c    /*Ras3 high decode address*/
-/* I/O CS device coding  */
-#define LOW_CS0_DREG  0x0420 /* CS0* low decode register */
-#define HI_CS0_DREG   0x0424 /* CS0* high decode register */
-#define LOW_CS1_DREG  0x0428 /* CS1* low decode register */
-#define HI_CS1_DREG   0x042c /* CS1* high decode register */
-#define LOW_CS2_DREG  0x0430 /* CS2* low decode register */
-#define HI_CS2_DREG   0x0434 /* CS2* high decode register */
-#define LOW_CS3_DREG  0x0438 /* CS3* low decode register */
-#define HI_CS3_DREG   0x043c /* CS3* high decode register */
-/* Boot PROM device coding */
-#define LOW_BOOTCS_DREG 0x0440 /* Boot CS low decode register */
-#define HI_BOOTCS_DREG 0x0444 /* Boot CS High decode register */
-/* DRAM group coding (for CPU)  */
-#define LO_RAS10_GREG 0x0008    /*Ras1..0 group low decode address*/
-#define HI_RAS10_GREG 0x0010    /*Ras1..0 group high decode address*/
-#define LO_RAS32_GREG 0x0018    /*Ras3..2 group low decode address  */
-#define HI_RAS32_GREG 0x0020    /*Ras3..2 group high decode address  */
-/* I/O CS group coding for (CPU)  */
-#define LO_CS20_GREG  0x0028 /* CS2..0 group low decode register */
-#define HI_CS20_GREG  0x0030 /* CS2..0 group high decode register */
-#define LO_CS3B_GREG  0x0038 /* CS3 & PROM group low decode register */
-#define HI_CS3B_GREG  0x0040 /* CS3 & PROM group high decode register */
-/* Galileo specific PCI config. */
-#define PCI_TIMEOUT_RET 0x0c04 /* Time Out and retry register */
-#define RAS10_BANKSIZE 0x0c08 /* RAS 1..0 group PCI bank size */
-#define RAS32_BANKSIZE 0x0c0c /* RAS 3..2 group PCI bank size */
-#define CS20_BANKSIZE 0x0c10 /* CS 2..0 group PCI bank size */
-#define CS3B_BANKSIZE 0x0c14 /* CS 3 & Boot group PCI bank size */
-#define DRAM_SIZE     0x0001      /*Dram size in mega bytes*/
-#define PROM_SIZE     0x08000     /*Prom size in bytes*/
-/*--------------------------------------------------------------------------*/
-#define OFFS_DIVA_INIT_TASK_COUNT 0x68
-#define OFFS_DSP_CODE_BASE_ADDR   0x6c
-#define OFFS_XLOG_BUF_ADDR        0x70
-#define OFFS_XLOG_COUNT_ADDR      0x74
-#define OFFS_XLOG_OUT_ADDR        0x78
-#define OFFS_PROTOCOL_END_ADDR    0x7c
-#define OFFS_PROTOCOL_ID_STRING   0x80
-/*--------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
deleted file mode 100644
index 1cd9aff..0000000
--- a/drivers/isdn/hardware/eicon/mntfunc.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/* $Id: mntfunc.c,v 1.19.6.4 2005/01/31 12:22:20 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * Maint module
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "debug_if.h"
-
-extern char *DRIVERRELEASE_MNT;
-
-#define DBG_MINIMUM  (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT  (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-extern void DIVA_DIDD_Read(void *, int);
-
-static dword notify_handle;
-static DESCRIPTOR DAdapter;
-static DESCRIPTOR MAdapter;
-static DESCRIPTOR MaintDescriptor =
-{ IDI_DIMAINT, 0, 0, (IDI_CALL) diva_maint_prtComp };
-
-extern int diva_os_copy_to_user(void *os_handle, void __user *dst,
-				const void *src, int length);
-extern int diva_os_copy_from_user(void *os_handle, void *dst,
-				  const void __user *src, int length);
-
-static void no_printf(unsigned char *x, ...)
-{
-	/* dummy debug function */
-}
-
-#include "debuglib.c"
-
-/*
- *  DIDD callback function
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
-			   int removal)
-{
-	if (adapter->type == IDI_DADAPTER) {
-		DBG_ERR(("cb: Change in DAdapter ? Oops ?."));
-	} else if (adapter->type == IDI_DIMAINT) {
-		if (removal) {
-			DbgDeregister();
-			memset(&MAdapter, 0, sizeof(MAdapter));
-			dprintf = no_printf;
-		} else {
-			memcpy(&MAdapter, adapter, sizeof(MAdapter));
-			dprintf = (DIVA_DI_PRINTF) MAdapter.request;
-			DbgRegister("MAINT", DRIVERRELEASE_MNT, DBG_DEFAULT);
-		}
-	} else if ((adapter->type > 0) && (adapter->type < 16)) {
-		if (removal) {
-			diva_mnt_remove_xdi_adapter(adapter);
-		} else {
-			diva_mnt_add_xdi_adapter(adapter);
-		}
-	}
-	return (NULL);
-}
-
-/*
- * connect to didd
- */
-static int __init connect_didd(void)
-{
-	int x = 0;
-	int dadapter = 0;
-	IDI_SYNC_REQ req;
-	DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
-	DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
-	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-		if (DIDD_Table[x].type == IDI_DADAPTER) {	/* DADAPTER found */
-			dadapter = 1;
-			memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
-			req.didd_notify.e.Req = 0;
-			req.didd_notify.e.Rc =
-				IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
-			req.didd_notify.info.callback = (void *)didd_callback;
-			req.didd_notify.info.context = NULL;
-			DAdapter.request((ENTITY *)&req);
-			if (req.didd_notify.e.Rc != 0xff)
-				return (0);
-			notify_handle = req.didd_notify.info.handle;
-			/* Register MAINT (me) */
-			req.didd_add_adapter.e.Req = 0;
-			req.didd_add_adapter.e.Rc =
-				IDI_SYNC_REQ_DIDD_ADD_ADAPTER;
-			req.didd_add_adapter.info.descriptor =
-				(void *) &MaintDescriptor;
-			DAdapter.request((ENTITY *)&req);
-			if (req.didd_add_adapter.e.Rc != 0xff)
-				return (0);
-		} else if ((DIDD_Table[x].type > 0)
-			   && (DIDD_Table[x].type < 16)) {
-			diva_mnt_add_xdi_adapter(&DIDD_Table[x]);
-		}
-	}
-	return (dadapter);
-}
-
-/*
- * disconnect from didd
- */
-static void __exit disconnect_didd(void)
-{
-	IDI_SYNC_REQ req;
-
-	req.didd_notify.e.Req = 0;
-	req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
-	req.didd_notify.info.handle = notify_handle;
-	DAdapter.request((ENTITY *)&req);
-
-	req.didd_remove_adapter.e.Req = 0;
-	req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER;
-	req.didd_remove_adapter.info.p_request =
-		(IDI_CALL) MaintDescriptor.request;
-	DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * read/write maint
- */
-int maint_read_write(void __user *buf, int count)
-{
-	byte data[128];
-	dword cmd, id, mask;
-	int ret = 0;
-
-	if (count < (3 * sizeof(dword)))
-		return (-EFAULT);
-
-	if (diva_os_copy_from_user(NULL, (void *) &data[0],
-				   buf, 3 * sizeof(dword))) {
-		return (-EFAULT);
-	}
-
-	cmd = *(dword *)&data[0];	/* command */
-	id = *(dword *)&data[4];	/* driver id */
-	mask = *(dword *)&data[8];	/* mask or size */
-
-	switch (cmd) {
-	case DITRACE_CMD_GET_DRIVER_INFO:
-		if ((ret = diva_get_driver_info(id, data, sizeof(data))) > 0) {
-			if ((count < ret) || diva_os_copy_to_user
-			    (NULL, buf, (void *) &data[0], ret))
-				ret = -EFAULT;
-		} else {
-			ret = -EINVAL;
-		}
-		break;
-
-	case DITRACE_READ_DRIVER_DBG_MASK:
-		if ((ret = diva_get_driver_dbg_mask(id, (byte *) data)) > 0) {
-			if ((count < ret) || diva_os_copy_to_user
-			    (NULL, buf, (void *) &data[0], ret))
-				ret = -EFAULT;
-		} else {
-			ret = -ENODEV;
-		}
-		break;
-
-	case DITRACE_WRITE_DRIVER_DBG_MASK:
-		if ((ret = diva_set_driver_dbg_mask(id, mask)) <= 0) {
-			ret = -ENODEV;
-		}
-		break;
-
-		/*
-		  Filter commands will ignore the ID due to fact that filtering affects
-		  the B- channel and Audio Tap trace levels only. Also MAINT driver will
-		  select the right trace ID by itself
-		*/
-	case DITRACE_WRITE_SELECTIVE_TRACE_FILTER:
-		if (!mask) {
-			ret = diva_set_trace_filter(1, "*");
-		} else if (mask < sizeof(data)) {
-			if (diva_os_copy_from_user(NULL, data, (char __user *)buf + 12, mask)) {
-				ret = -EFAULT;
-			} else {
-				ret = diva_set_trace_filter((int)mask, data);
-			}
-		} else {
-			ret = -EINVAL;
-		}
-		break;
-
-	case DITRACE_READ_SELECTIVE_TRACE_FILTER:
-		if ((ret = diva_get_trace_filter(sizeof(data), data)) > 0) {
-			if (diva_os_copy_to_user(NULL, buf, data, ret))
-				ret = -EFAULT;
-		} else {
-			ret = -ENODEV;
-		}
-		break;
-
-	case DITRACE_READ_TRACE_ENTRY:{
-		diva_os_spin_lock_magic_t old_irql;
-		word size;
-		diva_dbg_entry_head_t *pmsg;
-		byte *pbuf;
-
-		if (!(pbuf = diva_os_malloc(0, mask))) {
-			return (-ENOMEM);
-		}
-
-		for (;;) {
-			if (!(pmsg =
-			      diva_maint_get_message(&size, &old_irql))) {
-				break;
-			}
-			if (size > mask) {
-				diva_maint_ack_message(0, &old_irql);
-				ret = -EINVAL;
-				break;
-			}
-			ret = size;
-			memcpy(pbuf, pmsg, size);
-			diva_maint_ack_message(1, &old_irql);
-			if ((count < size) ||
-			    diva_os_copy_to_user(NULL, buf, (void *) pbuf, size))
-				ret = -EFAULT;
-			break;
-		}
-		diva_os_free(0, pbuf);
-	}
-		break;
-
-	case DITRACE_READ_TRACE_ENTRYS:{
-		diva_os_spin_lock_magic_t old_irql;
-		word size;
-		diva_dbg_entry_head_t *pmsg;
-		byte *pbuf = NULL;
-		int written = 0;
-
-		if (mask < 4096) {
-			ret = -EINVAL;
-			break;
-		}
-		if (!(pbuf = diva_os_malloc(0, mask))) {
-			return (-ENOMEM);
-		}
-
-		for (;;) {
-			if (!(pmsg =
-			      diva_maint_get_message(&size, &old_irql))) {
-				break;
-			}
-			if ((size + 8) > mask) {
-				diva_maint_ack_message(0, &old_irql);
-				break;
-			}
-			/*
-			  Write entry length
-			*/
-			pbuf[written++] = (byte) size;
-			pbuf[written++] = (byte) (size >> 8);
-			pbuf[written++] = 0;
-			pbuf[written++] = 0;
-			/*
-			  Write message
-			*/
-			memcpy(&pbuf[written], pmsg, size);
-			diva_maint_ack_message(1, &old_irql);
-			written += size;
-			mask -= (size + 4);
-		}
-		pbuf[written++] = 0;
-		pbuf[written++] = 0;
-		pbuf[written++] = 0;
-		pbuf[written++] = 0;
-
-		if ((count < written) || diva_os_copy_to_user(NULL, buf, (void *) pbuf, written)) {
-			ret = -EFAULT;
-		} else {
-			ret = written;
-		}
-		diva_os_free(0, pbuf);
-	}
-		break;
-
-	default:
-		ret = -EINVAL;
-	}
-	return (ret);
-}
-
-/*
- *  init
- */
-int __init mntfunc_init(int *buffer_length, void **buffer,
-				    unsigned long diva_dbg_mem)
-{
-	if (*buffer_length < 64) {
-		*buffer_length = 64;
-	}
-	if (*buffer_length > 512) {
-		*buffer_length = 512;
-	}
-	*buffer_length *= 1024;
-
-	if (diva_dbg_mem) {
-		*buffer = (void *) diva_dbg_mem;
-	} else {
-		while ((*buffer_length >= (64 * 1024))
-		       &&
-		       (!(*buffer = diva_os_malloc(0, *buffer_length)))) {
-			*buffer_length -= 1024;
-		}
-
-		if (!*buffer) {
-			DBG_ERR(("init: Can not alloc trace buffer"));
-			return (0);
-		}
-	}
-
-	if (diva_maint_init(*buffer, *buffer_length, (diva_dbg_mem == 0))) {
-		if (!diva_dbg_mem) {
-			diva_os_free(0, *buffer);
-		}
-		DBG_ERR(("init: maint init failed"));
-		return (0);
-	}
-
-	if (!connect_didd()) {
-		DBG_ERR(("init: failed to connect to DIDD."));
-		diva_maint_finit();
-		if (!diva_dbg_mem) {
-			diva_os_free(0, *buffer);
-		}
-		return (0);
-	}
-	return (1);
-}
-
-/*
- *  exit
- */
-void __exit mntfunc_finit(void)
-{
-	void *buffer;
-	int i = 100;
-
-	DbgDeregister();
-
-	while (diva_mnt_shutdown_xdi_adapters() && i--) {
-		diva_os_sleep(10);
-	}
-
-	disconnect_didd();
-
-	if ((buffer = diva_maint_finit())) {
-		diva_os_free(0, buffer);
-	}
-
-	memset(&MAdapter, 0, sizeof(MAdapter));
-	dprintf = no_printf;
-}
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c
deleted file mode 100644
index 87db5f4..0000000
--- a/drivers/isdn/hardware/eicon/os_4bri.c
+++ /dev/null
@@ -1,1132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: os_4bri.c,v 1.28.4.4 2005/02/11 19:40:25 armin Exp $ */
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "dsp_defs.h"
-#include "di.h"
-#include "io.h"
-
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "os_4bri.h"
-#include "diva_pci.h"
-#include "mi_pc.h"
-#include "dsrv4bri.h"
-#include "helpers.h"
-
-static void *diva_xdiLoadFileFile = NULL;
-static dword diva_xdiLoadFileLength = 0;
-
-/*
-**  IMPORTS
-*/
-extern void prepare_qBri_functions(PISDN_ADAPTER IoAdapter);
-extern void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter);
-extern void diva_xdi_display_adapter_features(int card);
-extern void diva_add_slave_adapter(diva_os_xdi_adapter_t *a);
-
-extern int qBri_FPGA_download(PISDN_ADAPTER IoAdapter);
-extern void start_qBri_hardware(PISDN_ADAPTER IoAdapter);
-
-extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a);
-
-/*
-**  LOCALS
-*/
-static unsigned long _4bri_bar_length[4] = {
-	0x100,
-	0x100,			/* I/O */
-	MQ_MEMORY_SIZE,
-	0x2000
-};
-static unsigned long _4bri_v2_bar_length[4] = {
-	0x100,
-	0x100,			/* I/O */
-	MQ2_MEMORY_SIZE,
-	0x10000
-};
-static unsigned long _4bri_v2_bri_bar_length[4] = {
-	0x100,
-	0x100,			/* I/O */
-	BRI2_MEMORY_SIZE,
-	0x10000
-};
-
-
-static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a);
-static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a);
-static int diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
-				   diva_xdi_um_cfg_cmd_t *cmd,
-				   int length);
-static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a);
-static int diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a,
-				      byte *data, dword length);
-static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter);
-static int diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
-				       dword address,
-				       const byte *data,
-				       dword length, dword limit);
-static int diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter,
-				   dword start_address, dword features);
-static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter);
-static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a);
-
-static int _4bri_is_rev_2_card(int card_ordinal)
-{
-	switch (card_ordinal) {
-	case CARDTYPE_DIVASRV_Q_8M_V2_PCI:
-	case CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI:
-	case CARDTYPE_DIVASRV_B_2M_V2_PCI:
-	case CARDTYPE_DIVASRV_B_2F_PCI:
-	case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
-		return (1);
-	}
-	return (0);
-}
-
-static int _4bri_is_rev_2_bri_card(int card_ordinal)
-{
-	switch (card_ordinal) {
-	case CARDTYPE_DIVASRV_B_2M_V2_PCI:
-	case CARDTYPE_DIVASRV_B_2F_PCI:
-	case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
-		return (1);
-	}
-	return (0);
-}
-
-static void diva_4bri_set_addresses(diva_os_xdi_adapter_t *a)
-{
-	dword offset = a->resources.pci.qoffset;
-	dword c_offset = offset * a->xdi_adapter.ControllerNumber;
-
-	a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 2;
-	a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 2;
-	a->resources.pci.mem_type_id[MEM_TYPE_CONTROL] = 2;
-	a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 0;
-	a->resources.pci.mem_type_id[MEM_TYPE_CTLREG] = 3;
-	a->resources.pci.mem_type_id[MEM_TYPE_PROM] = 0;
-
-	/*
-	  Set up hardware related pointers
-	*/
-	a->xdi_adapter.Address = a->resources.pci.addr[2];	/* BAR2 SDRAM  */
-	a->xdi_adapter.Address += c_offset;
-
-	a->xdi_adapter.Control = a->resources.pci.addr[2];	/* BAR2 SDRAM  */
-
-	a->xdi_adapter.ram = a->resources.pci.addr[2];	/* BAR2 SDRAM  */
-	a->xdi_adapter.ram += c_offset + (offset - MQ_SHARED_RAM_SIZE);
-
-	a->xdi_adapter.reset = a->resources.pci.addr[0];	/* BAR0 CONFIG */
-	/*
-	  ctlReg contains the register address for the MIPS CPU reset control
-	*/
-	a->xdi_adapter.ctlReg = a->resources.pci.addr[3];	/* BAR3 CNTRL  */
-	/*
-	  prom contains the register address for FPGA and EEPROM programming
-	*/
-	a->xdi_adapter.prom = &a->xdi_adapter.reset[0x6E];
-}
-
-/*
-**  BAR0 - MEM - 0x100    - CONFIG MEM
-**  BAR1 - I/O - 0x100    - UNUSED
-**  BAR2 - MEM - MQ_MEMORY_SIZE (MQ2_MEMORY_SIZE on Rev.2) - SDRAM
-**  BAR3 - MEM - 0x2000 (0x10000 on Rev.2)   - CNTRL
-**
-**  Called by master adapter, that will initialize and add slave adapters
-*/
-int diva_4bri_init_card(diva_os_xdi_adapter_t *a)
-{
-	int bar, i;
-	byte __iomem *p;
-	PADAPTER_LIST_ENTRY quadro_list;
-	diva_os_xdi_adapter_t *diva_current;
-	diva_os_xdi_adapter_t *adapter_list[4];
-	PISDN_ADAPTER Slave;
-	unsigned long bar_length[ARRAY_SIZE(_4bri_bar_length)];
-	int v2 = _4bri_is_rev_2_card(a->CardOrdinal);
-	int tasks = _4bri_is_rev_2_bri_card(a->CardOrdinal) ? 1 : MQ_INSTANCE_COUNT;
-	int factor = (tasks == 1) ? 1 : 2;
-
-	if (v2) {
-		if (_4bri_is_rev_2_bri_card(a->CardOrdinal)) {
-			memcpy(bar_length, _4bri_v2_bri_bar_length,
-			       sizeof(bar_length));
-		} else {
-			memcpy(bar_length, _4bri_v2_bar_length,
-			       sizeof(bar_length));
-		}
-	} else {
-		memcpy(bar_length, _4bri_bar_length, sizeof(bar_length));
-	}
-	DBG_TRC(("SDRAM_LENGTH=%08x, tasks=%d, factor=%d",
-		 bar_length[2], tasks, factor))
-
-		/*
-		  Get Serial Number
-		  The serial number of 4BRI is accessible in accordance with PCI spec
-		  via command register located in configuration space, also we do not
-		  have to map any BAR before we can access it
-		*/
-		if (!_4bri_get_serial_number(a)) {
-			DBG_ERR(("A: 4BRI can't get Serial Number"))
-				diva_4bri_cleanup_adapter(a);
-			return (-1);
-		}
-
-	/*
-	  Set properties
-	*/
-	a->xdi_adapter.Properties = CardProperties[a->CardOrdinal];
-	DBG_LOG(("Load %s, SN:%ld, bus:%02x, func:%02x",
-		 a->xdi_adapter.Properties.Name,
-		 a->xdi_adapter.serialNo,
-		 a->resources.pci.bus, a->resources.pci.func))
-
-		/*
-		  First initialization step: get and check hardware resoures.
-		  Do not map resources and do not access card at this step
-		*/
-		for (bar = 0; bar < 4; bar++) {
-			a->resources.pci.bar[bar] =
-				divasa_get_pci_bar(a->resources.pci.bus,
-						   a->resources.pci.func, bar,
-						   a->resources.pci.hdev);
-			if (!a->resources.pci.bar[bar]
-			    || (a->resources.pci.bar[bar] == 0xFFFFFFF0)) {
-				DBG_ERR(
-					("A: invalid bar[%d]=%08x", bar,
-					 a->resources.pci.bar[bar]))
-					return (-1);
-			}
-		}
-	a->resources.pci.irq =
-		(byte) divasa_get_pci_irq(a->resources.pci.bus,
-					  a->resources.pci.func,
-					  a->resources.pci.hdev);
-	if (!a->resources.pci.irq) {
-		DBG_ERR(("A: invalid irq"));
-		return (-1);
-	}
-
-	a->xdi_adapter.sdram_bar = a->resources.pci.bar[2];
-
-	/*
-	  Map all MEMORY BAR's
-	*/
-	for (bar = 0; bar < 4; bar++) {
-		if (bar != 1) {	/* ignore I/O */
-			a->resources.pci.addr[bar] =
-				divasa_remap_pci_bar(a, bar, a->resources.pci.bar[bar],
-						     bar_length[bar]);
-			if (!a->resources.pci.addr[bar]) {
-				DBG_ERR(("A: 4BRI: can't map bar[%d]", bar))
-					diva_4bri_cleanup_adapter(a);
-				return (-1);
-			}
-		}
-	}
-
-	/*
-	  Register I/O port
-	*/
-	sprintf(&a->port_name[0], "DIVA 4BRI %ld", (long) a->xdi_adapter.serialNo);
-
-	if (diva_os_register_io_port(a, 1, a->resources.pci.bar[1],
-				     bar_length[1], &a->port_name[0], 1)) {
-		DBG_ERR(("A: 4BRI: can't register bar[1]"))
-			diva_4bri_cleanup_adapter(a);
-		return (-1);
-	}
-
-	a->resources.pci.addr[1] =
-		(void *) (unsigned long) a->resources.pci.bar[1];
-
-	/*
-	  Set cleanup pointer for base adapter only, so slave adapter
-	  will be unable to get cleanup
-	*/
-	a->interface.cleanup_adapter_proc = diva_4bri_cleanup_adapter;
-
-	/*
-	  Create slave adapters
-	*/
-	if (tasks > 1) {
-		if (!(a->slave_adapters[0] =
-		      (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a))))
-		{
-			diva_4bri_cleanup_adapter(a);
-			return (-1);
-		}
-		if (!(a->slave_adapters[1] =
-		      (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a))))
-		{
-			diva_os_free(0, a->slave_adapters[0]);
-			a->slave_adapters[0] = NULL;
-			diva_4bri_cleanup_adapter(a);
-			return (-1);
-		}
-		if (!(a->slave_adapters[2] =
-		      (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a))))
-		{
-			diva_os_free(0, a->slave_adapters[0]);
-			diva_os_free(0, a->slave_adapters[1]);
-			a->slave_adapters[0] = NULL;
-			a->slave_adapters[1] = NULL;
-			diva_4bri_cleanup_adapter(a);
-			return (-1);
-		}
-		memset(a->slave_adapters[0], 0x00, sizeof(*a));
-		memset(a->slave_adapters[1], 0x00, sizeof(*a));
-		memset(a->slave_adapters[2], 0x00, sizeof(*a));
-	}
-
-	adapter_list[0] = a;
-	adapter_list[1] = a->slave_adapters[0];
-	adapter_list[2] = a->slave_adapters[1];
-	adapter_list[3] = a->slave_adapters[2];
-
-	/*
-	  Allocate slave list
-	*/
-	quadro_list =
-		(PADAPTER_LIST_ENTRY) diva_os_malloc(0, sizeof(*quadro_list));
-	if (!(a->slave_list = quadro_list)) {
-		for (i = 0; i < (tasks - 1); i++) {
-			diva_os_free(0, a->slave_adapters[i]);
-			a->slave_adapters[i] = NULL;
-		}
-		diva_4bri_cleanup_adapter(a);
-		return (-1);
-	}
-	memset(quadro_list, 0x00, sizeof(*quadro_list));
-
-	/*
-	  Set interfaces
-	*/
-	a->xdi_adapter.QuadroList = quadro_list;
-	for (i = 0; i < tasks; i++) {
-		adapter_list[i]->xdi_adapter.ControllerNumber = i;
-		adapter_list[i]->xdi_adapter.tasks = tasks;
-		quadro_list->QuadroAdapter[i] =
-			&adapter_list[i]->xdi_adapter;
-	}
-
-	for (i = 0; i < tasks; i++) {
-		diva_current = adapter_list[i];
-
-		diva_current->dsp_mask = 0x00000003;
-
-		diva_current->xdi_adapter.a.io =
-			&diva_current->xdi_adapter;
-		diva_current->xdi_adapter.DIRequest = request;
-		diva_current->interface.cmd_proc = diva_4bri_cmd_card_proc;
-		diva_current->xdi_adapter.Properties =
-			CardProperties[a->CardOrdinal];
-		diva_current->CardOrdinal = a->CardOrdinal;
-
-		diva_current->xdi_adapter.Channels =
-			CardProperties[a->CardOrdinal].Channels;
-		diva_current->xdi_adapter.e_max =
-			CardProperties[a->CardOrdinal].E_info;
-		diva_current->xdi_adapter.e_tbl =
-			diva_os_malloc(0,
-				       diva_current->xdi_adapter.e_max *
-				       sizeof(E_INFO));
-
-		if (!diva_current->xdi_adapter.e_tbl) {
-			diva_4bri_cleanup_slave_adapters(a);
-			diva_4bri_cleanup_adapter(a);
-			for (i = 1; i < (tasks - 1); i++) {
-				diva_os_free(0, adapter_list[i]);
-			}
-			return (-1);
-		}
-		memset(diva_current->xdi_adapter.e_tbl, 0x00,
-		       diva_current->xdi_adapter.e_max * sizeof(E_INFO));
-
-		if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.isr_spin_lock, "isr")) {
-			diva_4bri_cleanup_slave_adapters(a);
-			diva_4bri_cleanup_adapter(a);
-			for (i = 1; i < (tasks - 1); i++) {
-				diva_os_free(0, adapter_list[i]);
-			}
-			return (-1);
-		}
-		if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.data_spin_lock, "data")) {
-			diva_4bri_cleanup_slave_adapters(a);
-			diva_4bri_cleanup_adapter(a);
-			for (i = 1; i < (tasks - 1); i++) {
-				diva_os_free(0, adapter_list[i]);
-			}
-			return (-1);
-		}
-
-		strcpy(diva_current->xdi_adapter.req_soft_isr. dpc_thread_name, "kdivas4brid");
-
-		if (diva_os_initialize_soft_isr(&diva_current->xdi_adapter.req_soft_isr, DIDpcRoutine,
-						&diva_current->xdi_adapter)) {
-			diva_4bri_cleanup_slave_adapters(a);
-			diva_4bri_cleanup_adapter(a);
-			for (i = 1; i < (tasks - 1); i++) {
-				diva_os_free(0, adapter_list[i]);
-			}
-			return (-1);
-		}
-
-		/*
-		  Do not initialize second DPC - only one thread will be created
-		*/
-		diva_current->xdi_adapter.isr_soft_isr.object =
-			diva_current->xdi_adapter.req_soft_isr.object;
-	}
-
-	if (v2) {
-		prepare_qBri2_functions(&a->xdi_adapter);
-	} else {
-		prepare_qBri_functions(&a->xdi_adapter);
-	}
-
-	for (i = 0; i < tasks; i++) {
-		diva_current = adapter_list[i];
-		if (i)
-			memcpy(&diva_current->resources, &a->resources, sizeof(divas_card_resources_t));
-		diva_current->resources.pci.qoffset = (a->xdi_adapter.MemorySize >> factor);
-	}
-
-	/*
-	  Set up hardware related pointers
-	*/
-	a->xdi_adapter.cfg = (void *) (unsigned long) a->resources.pci.bar[0];	/* BAR0 CONFIG */
-	a->xdi_adapter.port = (void *) (unsigned long) a->resources.pci.bar[1];	/* BAR1        */
-	a->xdi_adapter.ctlReg = (void *) (unsigned long) a->resources.pci.bar[3];	/* BAR3 CNTRL  */
-
-	for (i = 0; i < tasks; i++) {
-		diva_current = adapter_list[i];
-		diva_4bri_set_addresses(diva_current);
-		Slave = a->xdi_adapter.QuadroList->QuadroAdapter[i];
-		Slave->MultiMaster = &a->xdi_adapter;
-		Slave->sdram_bar = a->xdi_adapter.sdram_bar;
-		if (i) {
-			Slave->serialNo = ((dword) (Slave->ControllerNumber << 24)) |
-				a->xdi_adapter.serialNo;
-			Slave->cardType = a->xdi_adapter.cardType;
-		}
-	}
-
-	/*
-	  reset contains the base address for the PLX 9054 register set
-	*/
-	p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
-	WRITE_BYTE(&p[PLX9054_INTCSR], 0x00);	/* disable PCI interrupts */
-	DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
-
-	/*
-	  Set IRQ handler
-	*/
-	a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq;
-	sprintf(a->xdi_adapter.irq_info.irq_name, "DIVA 4BRI %ld",
-		(long) a->xdi_adapter.serialNo);
-
-	if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr,
-				 a->xdi_adapter.irq_info.irq_name)) {
-		diva_4bri_cleanup_slave_adapters(a);
-		diva_4bri_cleanup_adapter(a);
-		for (i = 1; i < (tasks - 1); i++) {
-			diva_os_free(0, adapter_list[i]);
-		}
-		return (-1);
-	}
-
-	a->xdi_adapter.irq_info.registered = 1;
-
-	/*
-	  Add three slave adapters
-	*/
-	if (tasks > 1) {
-		diva_add_slave_adapter(adapter_list[1]);
-		diva_add_slave_adapter(adapter_list[2]);
-		diva_add_slave_adapter(adapter_list[3]);
-	}
-
-	diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name,
-		      a->resources.pci.irq, a->xdi_adapter.serialNo);
-
-	return (0);
-}
-
-/*
-**  Cleanup function will be called for master adapter only
-**  this is guaranteed by design: cleanup callback is set
-**  by master adapter only
-*/
-static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a)
-{
-	int bar;
-
-	/*
-	  Stop adapter if running
-	*/
-	if (a->xdi_adapter.Initialized) {
-		diva_4bri_stop_adapter(a);
-	}
-
-	/*
-	  Remove IRQ handler
-	*/
-	if (a->xdi_adapter.irq_info.registered) {
-		diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr);
-	}
-	a->xdi_adapter.irq_info.registered = 0;
-
-	/*
-	  Free DPC's and spin locks on all adapters
-	*/
-	diva_4bri_cleanup_slave_adapters(a);
-
-	/*
-	  Unmap all BARS
-	*/
-	for (bar = 0; bar < 4; bar++) {
-		if (bar != 1) {
-			if (a->resources.pci.bar[bar]
-			    && a->resources.pci.addr[bar]) {
-				divasa_unmap_pci_bar(a->resources.pci.addr[bar]);
-				a->resources.pci.bar[bar] = 0;
-				a->resources.pci.addr[bar] = NULL;
-			}
-		}
-	}
-
-	/*
-	  Unregister I/O
-	*/
-	if (a->resources.pci.bar[1] && a->resources.pci.addr[1]) {
-		diva_os_register_io_port(a, 0, a->resources.pci.bar[1],
-					 _4bri_is_rev_2_card(a->
-							     CardOrdinal) ?
-					 _4bri_v2_bar_length[1] :
-					 _4bri_bar_length[1],
-					 &a->port_name[0], 1);
-		a->resources.pci.bar[1] = 0;
-		a->resources.pci.addr[1] = NULL;
-	}
-
-	if (a->slave_list) {
-		diva_os_free(0, a->slave_list);
-		a->slave_list = NULL;
-	}
-
-	return (0);
-}
-
-static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a)
-{
-	dword data[64];
-	dword serNo;
-	word addr, status, i, j;
-	byte Bus, Slot;
-	void *hdev;
-
-	Bus = a->resources.pci.bus;
-	Slot = a->resources.pci.func;
-	hdev = a->resources.pci.hdev;
-
-	for (i = 0; i < 64; ++i) {
-		addr = i * 4;
-		for (j = 0; j < 5; ++j) {
-			PCIwrite(Bus, Slot, 0x4E, &addr, sizeof(addr),
-				 hdev);
-			diva_os_wait(1);
-			PCIread(Bus, Slot, 0x4E, &status, sizeof(status),
-				hdev);
-			if (status & 0x8000)
-				break;
-		}
-		if (j >= 5) {
-			DBG_ERR(("EEPROM[%d] read failed (0x%x)", i * 4, addr))
-				return (0);
-		}
-		PCIread(Bus, Slot, 0x50, &data[i], sizeof(data[i]), hdev);
-	}
-	DBG_BLK(((char *) &data[0], sizeof(data)))
-
-		serNo = data[32];
-	if (serNo == 0 || serNo == 0xffffffff)
-		serNo = data[63];
-
-	if (!serNo) {
-		DBG_LOG(("W: Serial Number == 0, create one serial number"));
-		serNo = a->resources.pci.bar[1] & 0xffff0000;
-		serNo |= a->resources.pci.bus << 8;
-		serNo |= a->resources.pci.func;
-	}
-
-	a->xdi_adapter.serialNo = serNo;
-
-	DBG_REG(("Serial No.          : %ld", a->xdi_adapter.serialNo))
-
-		return (serNo);
-}
-
-/*
-**  Release resources of slave adapters
-*/
-static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a)
-{
-	diva_os_xdi_adapter_t *adapter_list[4];
-	diva_os_xdi_adapter_t *diva_current;
-	int i;
-
-	adapter_list[0] = a;
-	adapter_list[1] = a->slave_adapters[0];
-	adapter_list[2] = a->slave_adapters[1];
-	adapter_list[3] = a->slave_adapters[2];
-
-	for (i = 0; i < a->xdi_adapter.tasks; i++) {
-		diva_current = adapter_list[i];
-		if (diva_current) {
-			diva_os_destroy_spin_lock(&diva_current->
-						  xdi_adapter.
-						  isr_spin_lock, "unload");
-			diva_os_destroy_spin_lock(&diva_current->
-						  xdi_adapter.
-						  data_spin_lock,
-						  "unload");
-
-			diva_os_cancel_soft_isr(&diva_current->xdi_adapter.
-						req_soft_isr);
-			diva_os_cancel_soft_isr(&diva_current->xdi_adapter.
-						isr_soft_isr);
-
-			diva_os_remove_soft_isr(&diva_current->xdi_adapter.
-						req_soft_isr);
-			diva_current->xdi_adapter.isr_soft_isr.object = NULL;
-
-			if (diva_current->xdi_adapter.e_tbl) {
-				diva_os_free(0,
-					     diva_current->xdi_adapter.
-					     e_tbl);
-			}
-			diva_current->xdi_adapter.e_tbl = NULL;
-			diva_current->xdi_adapter.e_max = 0;
-			diva_current->xdi_adapter.e_count = 0;
-		}
-	}
-
-	return (0);
-}
-
-static int
-diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
-			diva_xdi_um_cfg_cmd_t *cmd, int length)
-{
-	int ret = -1;
-
-	if (cmd->adapter != a->controller) {
-		DBG_ERR(("A: 4bri_cmd, invalid controller=%d != %d",
-			 cmd->adapter, a->controller))
-			return (-1);
-	}
-
-	switch (cmd->command) {
-	case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			*(dword *) a->xdi_mbox.data =
-				(dword) a->CardOrdinal;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_SERIAL_NR:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			*(dword *) a->xdi_mbox.data =
-				(dword) a->xdi_adapter.serialNo;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG:
-		if (!a->xdi_adapter.ControllerNumber) {
-			/*
-			  Only master adapter can access hardware config
-			*/
-			a->xdi_mbox.data_length = sizeof(dword) * 9;
-			a->xdi_mbox.data =
-				diva_os_malloc(0, a->xdi_mbox.data_length);
-			if (a->xdi_mbox.data) {
-				int i;
-				dword *data = (dword *) a->xdi_mbox.data;
-
-				for (i = 0; i < 8; i++) {
-					*data++ = a->resources.pci.bar[i];
-				}
-				*data++ = (dword) a->resources.pci.irq;
-				a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-				ret = 0;
-			}
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_CARD_STATE:
-		if (!a->xdi_adapter.ControllerNumber) {
-			a->xdi_mbox.data_length = sizeof(dword);
-			a->xdi_mbox.data =
-				diva_os_malloc(0, a->xdi_mbox.data_length);
-			if (a->xdi_mbox.data) {
-				dword *data = (dword *) a->xdi_mbox.data;
-				if (!a->xdi_adapter.ram
-				    || !a->xdi_adapter.reset
-				    || !a->xdi_adapter.cfg) {
-					*data = 3;
-				} else if (a->xdi_adapter.trapped) {
-					*data = 2;
-				} else if (a->xdi_adapter.Initialized) {
-					*data = 1;
-				} else {
-					*data = 0;
-				}
-				a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-				ret = 0;
-			}
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_WRITE_FPGA:
-		if (!a->xdi_adapter.ControllerNumber) {
-			ret =
-				diva_4bri_write_fpga_image(a,
-							   (byte *)&cmd[1],
-							   cmd->command_data.
-							   write_fpga.
-							   image_length);
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_RESET_ADAPTER:
-		if (!a->xdi_adapter.ControllerNumber) {
-			ret = diva_4bri_reset_adapter(&a->xdi_adapter);
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK:
-		if (!a->xdi_adapter.ControllerNumber) {
-			ret = diva_4bri_write_sdram_block(&a->xdi_adapter,
-							  cmd->
-							  command_data.
-							  write_sdram.
-							  offset,
-							  (byte *) &
-							  cmd[1],
-							  cmd->
-							  command_data.
-							  write_sdram.
-							  length,
-							  a->xdi_adapter.
-							  MemorySize);
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_START_ADAPTER:
-		if (!a->xdi_adapter.ControllerNumber) {
-			ret = diva_4bri_start_adapter(&a->xdi_adapter,
-						      cmd->command_data.
-						      start.offset,
-						      cmd->command_data.
-						      start.features);
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES:
-		if (!a->xdi_adapter.ControllerNumber) {
-			a->xdi_adapter.features =
-				cmd->command_data.features.features;
-			a->xdi_adapter.a.protocol_capabilities =
-				a->xdi_adapter.features;
-			DBG_TRC(("Set raw protocol features (%08x)",
-				 a->xdi_adapter.features))
-				ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_STOP_ADAPTER:
-		if (!a->xdi_adapter.ControllerNumber) {
-			ret = diva_4bri_stop_adapter(a);
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY:
-		ret = diva_card_read_xlog(a);
-		break;
-
-	case DIVA_XDI_UM_CMD_READ_SDRAM:
-		if (!a->xdi_adapter.ControllerNumber
-		    && a->xdi_adapter.Address) {
-			if (
-				(a->xdi_mbox.data_length =
-				 cmd->command_data.read_sdram.length)) {
-				if (
-					(a->xdi_mbox.data_length +
-					 cmd->command_data.read_sdram.offset) <
-					a->xdi_adapter.MemorySize) {
-					a->xdi_mbox.data =
-						diva_os_malloc(0,
-							       a->xdi_mbox.
-							       data_length);
-					if (a->xdi_mbox.data) {
-						byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(&a->xdi_adapter);
-						byte __iomem *src = p;
-						byte *dst = a->xdi_mbox.data;
-						dword len = a->xdi_mbox.data_length;
-
-						src += cmd->command_data.read_sdram.offset;
-
-						while (len--) {
-							*dst++ = READ_BYTE(src++);
-						}
-						DIVA_OS_MEM_DETACH_ADDRESS(&a->xdi_adapter, p);
-						a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-						ret = 0;
-					}
-				}
-			}
-		}
-		break;
-
-	default:
-		DBG_ERR(("A: A(%d) invalid cmd=%d", a->controller,
-			 cmd->command))
-			}
-
-	return (ret);
-}
-
-void *xdiLoadFile(char *FileName, dword *FileLength,
-		  unsigned long lim)
-{
-	void *ret = diva_xdiLoadFileFile;
-
-	if (FileLength) {
-		*FileLength = diva_xdiLoadFileLength;
-	}
-	diva_xdiLoadFileFile = NULL;
-	diva_xdiLoadFileLength = 0;
-
-	return (ret);
-}
-
-void diva_os_set_qBri_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-void diva_os_set_qBri2_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-static int
-diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a, byte *data,
-			   dword length)
-{
-	int ret;
-
-	diva_xdiLoadFileFile = data;
-	diva_xdiLoadFileLength = length;
-
-	ret = qBri_FPGA_download(&a->xdi_adapter);
-
-	diva_xdiLoadFileFile = NULL;
-	diva_xdiLoadFileLength = 0;
-
-	return (ret ? 0 : -1);
-}
-
-static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter)
-{
-	PISDN_ADAPTER Slave;
-	int i;
-
-	if (!IoAdapter->Address || !IoAdapter->reset) {
-		return (-1);
-	}
-	if (IoAdapter->Initialized) {
-		DBG_ERR(("A: A(%d) can't reset 4BRI adapter - please stop first",
-			 IoAdapter->ANum))
-			return (-1);
-	}
-
-	/*
-	  Forget all entities on all adapters
-	*/
-	for (i = 0; ((i < IoAdapter->tasks) && IoAdapter->QuadroList); i++) {
-		Slave = IoAdapter->QuadroList->QuadroAdapter[i];
-		Slave->e_count = 0;
-		if (Slave->e_tbl) {
-			memset(Slave->e_tbl, 0x00,
-			       Slave->e_max * sizeof(E_INFO));
-		}
-		Slave->head = 0;
-		Slave->tail = 0;
-		Slave->assign = 0;
-		Slave->trapped = 0;
-
-		memset(&Slave->a.IdTable[0], 0x00,
-		       sizeof(Slave->a.IdTable));
-		memset(&Slave->a.IdTypeTable[0], 0x00,
-		       sizeof(Slave->a.IdTypeTable));
-		memset(&Slave->a.FlowControlIdTable[0], 0x00,
-		       sizeof(Slave->a.FlowControlIdTable));
-		memset(&Slave->a.FlowControlSkipTable[0], 0x00,
-		       sizeof(Slave->a.FlowControlSkipTable));
-		memset(&Slave->a.misc_flags_table[0], 0x00,
-		       sizeof(Slave->a.misc_flags_table));
-		memset(&Slave->a.rx_stream[0], 0x00,
-		       sizeof(Slave->a.rx_stream));
-		memset(&Slave->a.tx_stream[0], 0x00,
-		       sizeof(Slave->a.tx_stream));
-		memset(&Slave->a.tx_pos[0], 0x00, sizeof(Slave->a.tx_pos));
-		memset(&Slave->a.rx_pos[0], 0x00, sizeof(Slave->a.rx_pos));
-	}
-
-	return (0);
-}
-
-
-static int
-diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
-			    dword address,
-			    const byte *data, dword length, dword limit)
-{
-	byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
-	byte __iomem *mem = p;
-
-	if (((address + length) >= limit) || !mem) {
-		DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
-		DBG_ERR(("A: A(%d) write 4BRI address=0x%08lx",
-			 IoAdapter->ANum, address + length))
-			return (-1);
-	}
-	mem += address;
-
-	while (length--) {
-		WRITE_BYTE(mem++, *data++);
-	}
-
-	DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
-	return (0);
-}
-
-static int
-diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter,
-			dword start_address, dword features)
-{
-	volatile word __iomem *signature;
-	int started = 0;
-	int i;
-	byte __iomem *p;
-
-	/*
-	  start adapter
-	*/
-	start_qBri_hardware(IoAdapter);
-
-	p = DIVA_OS_MEM_ATTACH_RAM(IoAdapter);
-	/*
-	  wait for signature in shared memory (max. 3 seconds)
-	*/
-	signature = (volatile word __iomem *) (&p[0x1E]);
-
-	for (i = 0; i < 300; ++i) {
-		diva_os_wait(10);
-		if (READ_WORD(&signature[0]) == 0x4447) {
-			DBG_TRC(("Protocol startup time %d.%02d seconds",
-				 (i / 100), (i % 100)))
-				started = 1;
-			break;
-		}
-	}
-
-	for (i = 1; i < IoAdapter->tasks; i++) {
-		IoAdapter->QuadroList->QuadroAdapter[i]->features =
-			IoAdapter->features;
-		IoAdapter->QuadroList->QuadroAdapter[i]->a.
-			protocol_capabilities = IoAdapter->features;
-	}
-
-	if (!started) {
-		DBG_FTL(("%s: Adapter selftest failed, signature=%04x",
-			 IoAdapter->Properties.Name,
-			 READ_WORD(&signature[0])))
-			DIVA_OS_MEM_DETACH_RAM(IoAdapter, p);
-		(*(IoAdapter->trapFnc)) (IoAdapter);
-		IoAdapter->stop(IoAdapter);
-		return (-1);
-	}
-	DIVA_OS_MEM_DETACH_RAM(IoAdapter, p);
-
-	for (i = 0; i < IoAdapter->tasks; i++) {
-		IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 1;
-		IoAdapter->QuadroList->QuadroAdapter[i]->IrqCount = 0;
-	}
-
-	if (check_qBri_interrupt(IoAdapter)) {
-		DBG_ERR(("A: A(%d) interrupt test failed",
-			 IoAdapter->ANum))
-			for (i = 0; i < IoAdapter->tasks; i++) {
-				IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0;
-			}
-		IoAdapter->stop(IoAdapter);
-		return (-1);
-	}
-
-	IoAdapter->Properties.Features = (word) features;
-	diva_xdi_display_adapter_features(IoAdapter->ANum);
-
-	for (i = 0; i < IoAdapter->tasks; i++) {
-		DBG_LOG(("A(%d) %s adapter successfully started",
-			 IoAdapter->QuadroList->QuadroAdapter[i]->ANum,
-			 (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI"))
-			diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum);
-		IoAdapter->QuadroList->QuadroAdapter[i]->Properties.Features = (word) features;
-	}
-
-	return (0);
-}
-
-static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter)
-{
-#ifdef	SUPPORT_INTERRUPT_TEST_ON_4BRI
-	int i;
-	ADAPTER *a = &IoAdapter->a;
-	byte __iomem *p;
-
-	IoAdapter->IrqCount = 0;
-
-	if (IoAdapter->ControllerNumber > 0)
-		return (-1);
-
-	p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE);
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-	/*
-	  interrupt test
-	*/
-	a->ReadyInt = 1;
-	a->ram_out(a, &PR_RAM->ReadyInt, 1);
-
-	for (i = 100; !IoAdapter->IrqCount && (i-- > 0); diva_os_wait(10));
-
-	return ((IoAdapter->IrqCount > 0) ? 0 : -1);
-#else
-	dword volatile __iomem *qBriIrq;
-	byte __iomem *p;
-	/*
-	  Reset on-board interrupt register
-	*/
-	IoAdapter->IrqCount = 0;
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	qBriIrq = (dword volatile __iomem *) (&p[_4bri_is_rev_2_card
-						 (IoAdapter->
-						  cardType) ? (MQ2_BREG_IRQ_TEST)
-						 : (MQ_BREG_IRQ_TEST)]);
-
-	WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
-	p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE);
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
-	diva_os_wait(100);
-
-	return (0);
-#endif				/* SUPPORT_INTERRUPT_TEST_ON_4BRI */
-}
-
-static void diva_4bri_clear_interrupts(diva_os_xdi_adapter_t *a)
-{
-	PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-
-	/*
-	  clear any pending interrupt
-	*/
-	IoAdapter->disIrq(IoAdapter);
-
-	IoAdapter->tst_irq(&IoAdapter->a);
-	IoAdapter->clr_irq(&IoAdapter->a);
-	IoAdapter->tst_irq(&IoAdapter->a);
-
-	/*
-	  kill pending dpcs
-	*/
-	diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr);
-	diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr);
-}
-
-static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a)
-{
-	PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-	int i;
-
-	if (!IoAdapter->ram) {
-		return (-1);
-	}
-
-	if (!IoAdapter->Initialized) {
-		DBG_ERR(("A: A(%d) can't stop PRI adapter - not running",
-			 IoAdapter->ANum))
-			return (-1);	/* nothing to stop */
-	}
-
-	for (i = 0; i < IoAdapter->tasks; i++) {
-		IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0;
-	}
-
-	/*
-	  Disconnect Adapters from DIDD
-	*/
-	for (i = 0; i < IoAdapter->tasks; i++) {
-		diva_xdi_didd_remove_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum);
-	}
-
-	i = 100;
-
-	/*
-	  Stop interrupts
-	*/
-	a->clear_interrupts_proc = diva_4bri_clear_interrupts;
-	IoAdapter->a.ReadyInt = 1;
-	IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt);
-	do {
-		diva_os_sleep(10);
-	} while (i-- && a->clear_interrupts_proc);
-
-	if (a->clear_interrupts_proc) {
-		diva_4bri_clear_interrupts(a);
-		a->clear_interrupts_proc = NULL;
-		DBG_ERR(("A: A(%d) no final interrupt from 4BRI adapter",
-			 IoAdapter->ANum))
-			}
-	IoAdapter->a.ReadyInt = 0;
-
-	/*
-	  Stop and reset adapter
-	*/
-	IoAdapter->stop(IoAdapter);
-
-	return (0);
-}
diff --git a/drivers/isdn/hardware/eicon/os_4bri.h b/drivers/isdn/hardware/eicon/os_4bri.h
deleted file mode 100644
index 94b2709..0000000
--- a/drivers/isdn/hardware/eicon/os_4bri.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: os_4bri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
-
-#ifndef __DIVA_OS_4_BRI_H__
-#define __DIVA_OS_4_BRI_H__
-
-int diva_4bri_init_card(diva_os_xdi_adapter_t *a);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c
deleted file mode 100644
index de93090..0000000
--- a/drivers/isdn/hardware/eicon/os_bri.c
+++ /dev/null
@@ -1,815 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: os_bri.c,v 1.21 2004/03/21 17:26:01 armin Exp $ */
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "dsp_defs.h"
-#include "di.h"
-#include "io.h"
-
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "os_bri.h"
-#include "diva_pci.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "dsrv_bri.h"
-
-/*
-**  IMPORTS
-*/
-extern void prepare_maestra_functions(PISDN_ADAPTER IoAdapter);
-extern void diva_xdi_display_adapter_features(int card);
-extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a);
-
-/*
-**  LOCALS
-*/
-static int bri_bar_length[3] = {
-	0x80,
-	0x80,
-	0x20
-};
-static int diva_bri_cleanup_adapter(diva_os_xdi_adapter_t *a);
-static dword diva_bri_get_serial_number(diva_os_xdi_adapter_t *a);
-static int diva_bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
-				  diva_xdi_um_cfg_cmd_t *cmd, int length);
-static int diva_bri_reregister_io(diva_os_xdi_adapter_t *a);
-static int diva_bri_reset_adapter(PISDN_ADAPTER IoAdapter);
-static int diva_bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
-				      dword address,
-				      const byte *data, dword length);
-static int diva_bri_start_adapter(PISDN_ADAPTER IoAdapter,
-				  dword start_address, dword features);
-static int diva_bri_stop_adapter(diva_os_xdi_adapter_t *a);
-
-static void diva_bri_set_addresses(diva_os_xdi_adapter_t *a)
-{
-	a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 0;
-	a->resources.pci.mem_type_id[MEM_TYPE_CFG] = 1;
-	a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 2;
-	a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 1;
-	a->resources.pci.mem_type_id[MEM_TYPE_PORT] = 2;
-	a->resources.pci.mem_type_id[MEM_TYPE_CTLREG] = 2;
-
-	a->xdi_adapter.ram = a->resources.pci.addr[0];
-	a->xdi_adapter.cfg = a->resources.pci.addr[1];
-	a->xdi_adapter.Address = a->resources.pci.addr[2];
-
-	a->xdi_adapter.reset = a->xdi_adapter.cfg;
-	a->xdi_adapter.port = a->xdi_adapter.Address;
-
-	a->xdi_adapter.ctlReg = a->xdi_adapter.port + M_PCI_RESET;
-
-	a->xdi_adapter.reset += 0x4C;	/* PLX 9050 !! */
-}
-
-/*
-**  BAR0 - MEM Addr  - 0x80  - NOT USED
-**  BAR1 - I/O Addr  - 0x80
-**  BAR2 - I/O Addr  - 0x20
-*/
-int diva_bri_init_card(diva_os_xdi_adapter_t *a)
-{
-	int bar;
-	dword bar2 = 0, bar2_length = 0xffffffff;
-	word cmd = 0, cmd_org;
-	byte Bus, Slot;
-	void *hdev;
-	byte __iomem *p;
-
-	/*
-	  Set properties
-	*/
-	a->xdi_adapter.Properties = CardProperties[a->CardOrdinal];
-	DBG_LOG(("Load %s", a->xdi_adapter.Properties.Name))
-
-		/*
-		  Get resources
-		*/
-		for (bar = 0; bar < 3; bar++) {
-			a->resources.pci.bar[bar] =
-				divasa_get_pci_bar(a->resources.pci.bus,
-						   a->resources.pci.func, bar,
-						   a->resources.pci.hdev);
-			if (!a->resources.pci.bar[bar]) {
-				DBG_ERR(("A: can't get BAR[%d]", bar))
-					return (-1);
-			}
-		}
-
-	a->resources.pci.irq =
-		(byte) divasa_get_pci_irq(a->resources.pci.bus,
-					  a->resources.pci.func,
-					  a->resources.pci.hdev);
-	if (!a->resources.pci.irq) {
-		DBG_ERR(("A: invalid irq"));
-		return (-1);
-	}
-
-	/*
-	  Get length of I/O bar 2 - it is different by older
-	  EEPROM version
-	*/
-	Bus = a->resources.pci.bus;
-	Slot = a->resources.pci.func;
-	hdev = a->resources.pci.hdev;
-
-	/*
-	  Get plain original values of the BAR2 CDM registers
-	*/
-	PCIread(Bus, Slot, 0x18, &bar2, sizeof(bar2), hdev);
-	PCIread(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
-	/*
-	  Disable device and get BAR2 length
-	*/
-	PCIwrite(Bus, Slot, 0x04, &cmd, sizeof(cmd), hdev);
-	PCIwrite(Bus, Slot, 0x18, &bar2_length, sizeof(bar2_length), hdev);
-	PCIread(Bus, Slot, 0x18, &bar2_length, sizeof(bar2_length), hdev);
-	/*
-	  Restore BAR2 and CMD registers
-	*/
-	PCIwrite(Bus, Slot, 0x18, &bar2, sizeof(bar2), hdev);
-	PCIwrite(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
-
-	/*
-	  Calculate BAR2 length
-	*/
-	bar2_length = (~(bar2_length & ~7)) + 1;
-	DBG_LOG(("BAR[2] length=%lx", bar2_length))
-
-		/*
-		  Map and register resources
-		*/
-		if (!(a->resources.pci.addr[0] =
-		      divasa_remap_pci_bar(a, 0, a->resources.pci.bar[0],
-					   bri_bar_length[0]))) {
-			DBG_ERR(("A: BRI, can't map BAR[0]"))
-				diva_bri_cleanup_adapter(a);
-			return (-1);
-		}
-
-	sprintf(&a->port_name[0], "BRI %02x:%02x",
-		a->resources.pci.bus, a->resources.pci.func);
-
-	if (diva_os_register_io_port(a, 1, a->resources.pci.bar[1],
-				     bri_bar_length[1], &a->port_name[0], 1)) {
-		DBG_ERR(("A: BRI, can't register BAR[1]"))
-			diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-	a->resources.pci.addr[1] = (void *) (unsigned long) a->resources.pci.bar[1];
-	a->resources.pci.length[1] = bri_bar_length[1];
-
-	if (diva_os_register_io_port(a, 1, a->resources.pci.bar[2],
-				     bar2_length, &a->port_name[0], 2)) {
-		DBG_ERR(("A: BRI, can't register BAR[2]"))
-			diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-	a->resources.pci.addr[2] = (void *) (unsigned long) a->resources.pci.bar[2];
-	a->resources.pci.length[2] = bar2_length;
-
-	/*
-	  Set all memory areas
-	*/
-	diva_bri_set_addresses(a);
-
-	/*
-	  Get Serial Number
-	*/
-	a->xdi_adapter.serialNo = diva_bri_get_serial_number(a);
-
-	/*
-	  Register I/O ports with correct name now
-	*/
-	if (diva_bri_reregister_io(a)) {
-		diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-
-	/*
-	  Initialize OS dependent objects
-	*/
-	if (diva_os_initialize_spin_lock
-	    (&a->xdi_adapter.isr_spin_lock, "isr")) {
-		diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-	if (diva_os_initialize_spin_lock
-	    (&a->xdi_adapter.data_spin_lock, "data")) {
-		diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-
-	strcpy(a->xdi_adapter.req_soft_isr.dpc_thread_name, "kdivasbrid");
-
-	if (diva_os_initialize_soft_isr(&a->xdi_adapter.req_soft_isr,
-					DIDpcRoutine, &a->xdi_adapter)) {
-		diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-	/*
-	  Do not initialize second DPC - only one thread will be created
-	*/
-	a->xdi_adapter.isr_soft_isr.object = a->xdi_adapter.req_soft_isr.object;
-
-	/*
-	  Create entity table
-	*/
-	a->xdi_adapter.Channels = CardProperties[a->CardOrdinal].Channels;
-	a->xdi_adapter.e_max = CardProperties[a->CardOrdinal].E_info;
-	a->xdi_adapter.e_tbl = diva_os_malloc(0, a->xdi_adapter.e_max * sizeof(E_INFO));
-	if (!a->xdi_adapter.e_tbl) {
-		diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-	memset(a->xdi_adapter.e_tbl, 0x00, a->xdi_adapter.e_max * sizeof(E_INFO));
-
-	/*
-	  Set up interface
-	*/
-	a->xdi_adapter.a.io = &a->xdi_adapter;
-	a->xdi_adapter.DIRequest = request;
-	a->interface.cleanup_adapter_proc = diva_bri_cleanup_adapter;
-	a->interface.cmd_proc = diva_bri_cmd_card_proc;
-
-	p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
-	outpp(p, 0x41);
-	DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
-
-	prepare_maestra_functions(&a->xdi_adapter);
-
-	a->dsp_mask = 0x00000003;
-
-	/*
-	  Set IRQ handler
-	*/
-	a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq;
-	sprintf(a->xdi_adapter.irq_info.irq_name, "DIVA BRI %ld",
-		(long) a->xdi_adapter.serialNo);
-	if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr,
-				 a->xdi_adapter.irq_info.irq_name)) {
-		diva_bri_cleanup_adapter(a);
-		return (-1);
-	}
-	a->xdi_adapter.irq_info.registered = 1;
-
-	diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name,
-		      a->resources.pci.irq, a->xdi_adapter.serialNo);
-
-	return (0);
-}
-
-
-static int diva_bri_cleanup_adapter(diva_os_xdi_adapter_t *a)
-{
-	int i;
-
-	if (a->xdi_adapter.Initialized) {
-		diva_bri_stop_adapter(a);
-	}
-
-	/*
-	  Remove ISR Handler
-	*/
-	if (a->xdi_adapter.irq_info.registered) {
-		diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr);
-	}
-	a->xdi_adapter.irq_info.registered = 0;
-
-	if (a->resources.pci.addr[0] && a->resources.pci.bar[0]) {
-		divasa_unmap_pci_bar(a->resources.pci.addr[0]);
-		a->resources.pci.addr[0] = NULL;
-		a->resources.pci.bar[0] = 0;
-	}
-
-	for (i = 1; i < 3; i++) {
-		if (a->resources.pci.addr[i] && a->resources.pci.bar[i]) {
-			diva_os_register_io_port(a, 0,
-						 a->resources.pci.bar[i],
-						 a->resources.pci.
-						 length[i],
-						 &a->port_name[0], i);
-			a->resources.pci.addr[i] = NULL;
-			a->resources.pci.bar[i] = 0;
-		}
-	}
-
-	/*
-	  Free OS objects
-	*/
-	diva_os_cancel_soft_isr(&a->xdi_adapter.req_soft_isr);
-	diva_os_cancel_soft_isr(&a->xdi_adapter.isr_soft_isr);
-
-	diva_os_remove_soft_isr(&a->xdi_adapter.req_soft_isr);
-	a->xdi_adapter.isr_soft_isr.object = NULL;
-
-	diva_os_destroy_spin_lock(&a->xdi_adapter.isr_spin_lock, "rm");
-	diva_os_destroy_spin_lock(&a->xdi_adapter.data_spin_lock, "rm");
-
-	/*
-	  Free memory
-	*/
-	if (a->xdi_adapter.e_tbl) {
-		diva_os_free(0, a->xdi_adapter.e_tbl);
-		a->xdi_adapter.e_tbl = NULL;
-	}
-
-	return (0);
-}
-
-void diva_os_prepare_maestra_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-/*
-**  Get serial number
-*/
-static dword diva_bri_get_serial_number(diva_os_xdi_adapter_t *a)
-{
-	dword serNo = 0;
-	byte __iomem *confIO;
-	word serHi, serLo;
-	word __iomem *confMem;
-
-	confIO = DIVA_OS_MEM_ATTACH_CFG(&a->xdi_adapter);
-	serHi = (word) (inppw(&confIO[0x22]) & 0x0FFF);
-	serLo = (word) (inppw(&confIO[0x26]) & 0x0FFF);
-	serNo = ((dword) serHi << 16) | (dword) serLo;
-	DIVA_OS_MEM_DETACH_CFG(&a->xdi_adapter, confIO);
-
-	if ((serNo == 0) || (serNo == 0xFFFFFFFF)) {
-		DBG_FTL(("W: BRI use BAR[0] to get card serial number"))
-
-			confMem = (word __iomem *)DIVA_OS_MEM_ATTACH_RAM(&a->xdi_adapter);
-		serHi = (word) (READ_WORD(&confMem[0x11]) & 0x0FFF);
-		serLo = (word) (READ_WORD(&confMem[0x13]) & 0x0FFF);
-		serNo = (((dword) serHi) << 16) | ((dword) serLo);
-		DIVA_OS_MEM_DETACH_RAM(&a->xdi_adapter, confMem);
-	}
-
-	DBG_LOG(("Serial Number=%ld", serNo))
-
-		return (serNo);
-}
-
-/*
-**  Unregister I/O and register it with new name,
-**  based on Serial Number
-*/
-static int diva_bri_reregister_io(diva_os_xdi_adapter_t *a)
-{
-	int i;
-
-	for (i = 1; i < 3; i++) {
-		diva_os_register_io_port(a, 0, a->resources.pci.bar[i],
-					 a->resources.pci.length[i],
-					 &a->port_name[0], i);
-		a->resources.pci.addr[i] = NULL;
-	}
-
-	sprintf(a->port_name, "DIVA BRI %ld",
-		(long) a->xdi_adapter.serialNo);
-
-	for (i = 1; i < 3; i++) {
-		if (diva_os_register_io_port(a, 1, a->resources.pci.bar[i],
-					     a->resources.pci.length[i],
-					     &a->port_name[0], i)) {
-			DBG_ERR(("A: failed to reregister BAR[%d]", i))
-				return (-1);
-		}
-		a->resources.pci.addr[i] =
-			(void *) (unsigned long) a->resources.pci.bar[i];
-	}
-
-	return (0);
-}
-
-/*
-**  Process command from user mode
-*/
-static int
-diva_bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
-		       diva_xdi_um_cfg_cmd_t *cmd, int length)
-{
-	int ret = -1;
-
-	if (cmd->adapter != a->controller) {
-		DBG_ERR(("A: pri_cmd, invalid controller=%d != %d",
-			 cmd->adapter, a->controller))
-			return (-1);
-	}
-
-	switch (cmd->command) {
-	case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			*(dword *) a->xdi_mbox.data =
-				(dword) a->CardOrdinal;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_SERIAL_NR:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			*(dword *) a->xdi_mbox.data =
-				(dword) a->xdi_adapter.serialNo;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG:
-		a->xdi_mbox.data_length = sizeof(dword) * 9;
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			int i;
-			dword *data = (dword *) a->xdi_mbox.data;
-
-			for (i = 0; i < 8; i++) {
-				*data++ = a->resources.pci.bar[i];
-			}
-			*data++ = (dword) a->resources.pci.irq;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_CARD_STATE:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			dword *data = (dword *) a->xdi_mbox.data;
-			if (!a->xdi_adapter.port) {
-				*data = 3;
-			} else if (a->xdi_adapter.trapped) {
-				*data = 2;
-			} else if (a->xdi_adapter.Initialized) {
-				*data = 1;
-			} else {
-				*data = 0;
-			}
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_RESET_ADAPTER:
-		ret = diva_bri_reset_adapter(&a->xdi_adapter);
-		break;
-
-	case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK:
-		ret = diva_bri_write_sdram_block(&a->xdi_adapter,
-						 cmd->command_data.
-						 write_sdram.offset,
-						 (byte *)&cmd[1],
-						 cmd->command_data.
-						 write_sdram.length);
-		break;
-
-	case DIVA_XDI_UM_CMD_START_ADAPTER:
-		ret = diva_bri_start_adapter(&a->xdi_adapter,
-					     cmd->command_data.start.
-					     offset,
-					     cmd->command_data.start.
-					     features);
-		break;
-
-	case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES:
-		a->xdi_adapter.features =
-			cmd->command_data.features.features;
-		a->xdi_adapter.a.protocol_capabilities =
-			a->xdi_adapter.features;
-		DBG_TRC(
-			("Set raw protocol features (%08x)",
-			 a->xdi_adapter.features)) ret = 0;
-		break;
-
-	case DIVA_XDI_UM_CMD_STOP_ADAPTER:
-		ret = diva_bri_stop_adapter(a);
-		break;
-
-	case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY:
-		ret = diva_card_read_xlog(a);
-		break;
-
-	default:
-		DBG_ERR(
-			("A: A(%d) invalid cmd=%d", a->controller,
-			 cmd->command))}
-
-	return (ret);
-}
-
-static int diva_bri_reset_adapter(PISDN_ADAPTER IoAdapter)
-{
-	byte __iomem *addrHi, *addrLo, *ioaddr;
-	dword i;
-	byte __iomem *Port;
-
-	if (!IoAdapter->port) {
-		return (-1);
-	}
-	if (IoAdapter->Initialized) {
-		DBG_ERR(("A: A(%d) can't reset BRI adapter - please stop first",
-			 IoAdapter->ANum)) return (-1);
-	}
-	(*(IoAdapter->rstFnc)) (IoAdapter);
-	diva_os_wait(100);
-	Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
-	addrHi = Port +
-		((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
-	addrLo = Port + ADDR;
-	ioaddr = Port + DATA;
-	/*
-	  recover
-	*/
-	outpp(addrHi, (byte) 0);
-	outppw(addrLo, (word) 0);
-	outppw(ioaddr, (word) 0);
-	/*
-	  clear shared memory
-	*/
-	outpp(addrHi,
-	      (byte) (
-		      (IoAdapter->MemoryBase + IoAdapter->MemorySize -
-		       BRI_SHARED_RAM_SIZE) >> 16));
-	outppw(addrLo, 0);
-	for (i = 0; i < 0x8000; outppw(ioaddr, 0), ++i);
-	diva_os_wait(100);
-
-	/*
-	  clear signature
-	*/
-	outpp(addrHi,
-	      (byte) (
-		      (IoAdapter->MemoryBase + IoAdapter->MemorySize -
-		       BRI_SHARED_RAM_SIZE) >> 16));
-	outppw(addrLo, 0x1e);
-	outpp(ioaddr, 0);
-	outpp(ioaddr, 0);
-
-	outpp(addrHi, (byte) 0);
-	outppw(addrLo, (word) 0);
-	outppw(ioaddr, (word) 0);
-
-	DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-
-	/*
-	  Forget all outstanding entities
-	*/
-	IoAdapter->e_count = 0;
-	if (IoAdapter->e_tbl) {
-		memset(IoAdapter->e_tbl, 0x00,
-		       IoAdapter->e_max * sizeof(E_INFO));
-	}
-	IoAdapter->head = 0;
-	IoAdapter->tail = 0;
-	IoAdapter->assign = 0;
-	IoAdapter->trapped = 0;
-
-	memset(&IoAdapter->a.IdTable[0], 0x00,
-	       sizeof(IoAdapter->a.IdTable));
-	memset(&IoAdapter->a.IdTypeTable[0], 0x00,
-	       sizeof(IoAdapter->a.IdTypeTable));
-	memset(&IoAdapter->a.FlowControlIdTable[0], 0x00,
-	       sizeof(IoAdapter->a.FlowControlIdTable));
-	memset(&IoAdapter->a.FlowControlSkipTable[0], 0x00,
-	       sizeof(IoAdapter->a.FlowControlSkipTable));
-	memset(&IoAdapter->a.misc_flags_table[0], 0x00,
-	       sizeof(IoAdapter->a.misc_flags_table));
-	memset(&IoAdapter->a.rx_stream[0], 0x00,
-	       sizeof(IoAdapter->a.rx_stream));
-	memset(&IoAdapter->a.tx_stream[0], 0x00,
-	       sizeof(IoAdapter->a.tx_stream));
-	memset(&IoAdapter->a.tx_pos[0], 0x00, sizeof(IoAdapter->a.tx_pos));
-	memset(&IoAdapter->a.rx_pos[0], 0x00, sizeof(IoAdapter->a.rx_pos));
-
-	return (0);
-}
-
-static int
-diva_bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
-			   dword address, const byte *data, dword length)
-{
-	byte __iomem *addrHi, *addrLo, *ioaddr;
-	byte __iomem *Port;
-
-	if (!IoAdapter->port) {
-		return (-1);
-	}
-
-	Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
-	addrHi = Port +
-		((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
-	addrLo = Port + ADDR;
-	ioaddr = Port + DATA;
-
-	while (length--) {
-		outpp(addrHi, (word) (address >> 16));
-		outppw(addrLo, (word) (address & 0x0000ffff));
-		outpp(ioaddr, *data++);
-		address++;
-	}
-
-	DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-	return (0);
-}
-
-static int
-diva_bri_start_adapter(PISDN_ADAPTER IoAdapter,
-		       dword start_address, dword features)
-{
-	byte __iomem *Port;
-	dword i, test;
-	byte __iomem *addrHi, *addrLo, *ioaddr;
-	int started = 0;
-	ADAPTER *a = &IoAdapter->a;
-
-	if (IoAdapter->Initialized) {
-		DBG_ERR(
-			("A: A(%d) bri_start_adapter, adapter already running",
-			 IoAdapter->ANum)) return (-1);
-	}
-	if (!IoAdapter->port) {
-		DBG_ERR(("A: A(%d) bri_start_adapter, adapter not mapped",
-			 IoAdapter->ANum)) return (-1);
-	}
-
-	sprintf(IoAdapter->Name, "A(%d)", (int) IoAdapter->ANum);
-	DBG_LOG(("A(%d) start BRI", IoAdapter->ANum))
-
-		Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
-	addrHi = Port +
-		((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
-	addrLo = Port + ADDR;
-	ioaddr = Port + DATA;
-
-	outpp(addrHi,
-	      (byte) (
-		      (IoAdapter->MemoryBase + IoAdapter->MemorySize -
-		       BRI_SHARED_RAM_SIZE) >> 16));
-	outppw(addrLo, 0x1e);
-	outppw(ioaddr, 0x00);
-	DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-
-	/*
-	  start the protocol code
-	*/
-	Port = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	outpp(Port, 0x08);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, Port);
-
-	Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
-	addrHi = Port +
-		((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
-	addrLo = Port + ADDR;
-	ioaddr = Port + DATA;
-	/*
-	  wait for signature (max. 3 seconds)
-	*/
-	for (i = 0; i < 300; ++i) {
-		diva_os_wait(10);
-		outpp(addrHi,
-		      (byte) (
-			      (IoAdapter->MemoryBase +
-			       IoAdapter->MemorySize -
-			       BRI_SHARED_RAM_SIZE) >> 16));
-		outppw(addrLo, 0x1e);
-		test = (dword) inppw(ioaddr);
-		if (test == 0x4447) {
-			DBG_LOG(
-				("Protocol startup time %d.%02d seconds",
-				 (i / 100), (i % 100)))
-				started = 1;
-			break;
-		}
-	}
-	DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-
-	if (!started) {
-		DBG_FTL(("A: A(%d) %s: Adapter selftest failed 0x%04X",
-			 IoAdapter->ANum, IoAdapter->Properties.Name,
-			 test))
-			(*(IoAdapter->trapFnc)) (IoAdapter);
-		return (-1);
-	}
-
-	IoAdapter->Initialized = 1;
-
-	/*
-	  Check Interrupt
-	*/
-	IoAdapter->IrqCount = 0;
-	a->ReadyInt = 1;
-
-	if (IoAdapter->reset) {
-		Port = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-		outpp(Port, 0x41);
-		DIVA_OS_MEM_DETACH_RESET(IoAdapter, Port);
-	}
-
-	a->ram_out(a, &PR_RAM->ReadyInt, 1);
-	for (i = 0; ((!IoAdapter->IrqCount) && (i < 100)); i++) {
-		diva_os_wait(10);
-	}
-	if (!IoAdapter->IrqCount) {
-		DBG_ERR(
-			("A: A(%d) interrupt test failed",
-			 IoAdapter->ANum))
-			IoAdapter->Initialized = 0;
-		IoAdapter->stop(IoAdapter);
-		return (-1);
-	}
-
-	IoAdapter->Properties.Features = (word) features;
-	diva_xdi_display_adapter_features(IoAdapter->ANum);
-	DBG_LOG(("A(%d) BRI adapter successfully started", IoAdapter->ANum))
-		/*
-		  Register with DIDD
-		*/
-		diva_xdi_didd_register_adapter(IoAdapter->ANum);
-
-	return (0);
-}
-
-static void diva_bri_clear_interrupts(diva_os_xdi_adapter_t *a)
-{
-	PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-
-	/*
-	  clear any pending interrupt
-	*/
-	IoAdapter->disIrq(IoAdapter);
-
-	IoAdapter->tst_irq(&IoAdapter->a);
-	IoAdapter->clr_irq(&IoAdapter->a);
-	IoAdapter->tst_irq(&IoAdapter->a);
-
-	/*
-	  kill pending dpcs
-	*/
-	diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr);
-	diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr);
-}
-
-/*
-**  Stop card
-*/
-static int diva_bri_stop_adapter(diva_os_xdi_adapter_t *a)
-{
-	PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-	int i = 100;
-
-	if (!IoAdapter->port) {
-		return (-1);
-	}
-	if (!IoAdapter->Initialized) {
-		DBG_ERR(("A: A(%d) can't stop BRI adapter - not running",
-			 IoAdapter->ANum))
-			return (-1);	/* nothing to stop */
-	}
-	IoAdapter->Initialized = 0;
-
-	/*
-	  Disconnect Adapter from DIDD
-	*/
-	diva_xdi_didd_remove_adapter(IoAdapter->ANum);
-
-	/*
-	  Stop interrupts
-	*/
-	a->clear_interrupts_proc = diva_bri_clear_interrupts;
-	IoAdapter->a.ReadyInt = 1;
-	IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt);
-	do {
-		diva_os_sleep(10);
-	} while (i-- && a->clear_interrupts_proc);
-	if (a->clear_interrupts_proc) {
-		diva_bri_clear_interrupts(a);
-		a->clear_interrupts_proc = NULL;
-		DBG_ERR(("A: A(%d) no final interrupt from BRI adapter",
-			 IoAdapter->ANum))
-			}
-	IoAdapter->a.ReadyInt = 0;
-
-	/*
-	  Stop and reset adapter
-	*/
-	IoAdapter->stop(IoAdapter);
-
-	return (0);
-}
diff --git a/drivers/isdn/hardware/eicon/os_bri.h b/drivers/isdn/hardware/eicon/os_bri.h
deleted file mode 100644
index 37c92cc..0000000
--- a/drivers/isdn/hardware/eicon/os_bri.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: os_bri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
-
-#ifndef __DIVA_OS_BRI_REV_1_H__
-#define __DIVA_OS_BRI_REV_1_H__
-
-int diva_bri_init_card(diva_os_xdi_adapter_t *a);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/os_capi.h b/drivers/isdn/hardware/eicon/os_capi.h
deleted file mode 100644
index e72394b..0000000
--- a/drivers/isdn/hardware/eicon/os_capi.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* $Id: os_capi.h,v 1.7 2003/04/12 21:40:49 schindler Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface OS include files
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#ifndef __OS_CAPI_H__
-#define __OS_CAPI_H__
-
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-
-#endif /* __OS_CAPI_H__ */
diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c
deleted file mode 100644
index b20f1fb..0000000
--- a/drivers/isdn/hardware/eicon/os_pri.c
+++ /dev/null
@@ -1,1053 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: os_pri.c,v 1.32 2004/03/21 17:26:01 armin Exp $ */
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "dsp_defs.h"
-#include "di.h"
-#include "io.h"
-
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "os_pri.h"
-#include "diva_pci.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "dsp_tst.h"
-#include "diva_dma.h"
-#include "dsrv_pri.h"
-
-/* --------------------------------------------------------------------------
-   OS Dependent part of XDI driver for DIVA PRI Adapter
-
-   DSP detection/validation by Anthony Booth (Eicon Networks, www.eicon.com)
-   -------------------------------------------------------------------------- */
-
-#define DIVA_PRI_NO_PCI_BIOS_WORKAROUND 1
-
-extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a);
-
-/*
-**  IMPORTS
-*/
-extern void prepare_pri_functions(PISDN_ADAPTER IoAdapter);
-extern void prepare_pri2_functions(PISDN_ADAPTER IoAdapter);
-extern void diva_xdi_display_adapter_features(int card);
-
-static int diva_pri_cleanup_adapter(diva_os_xdi_adapter_t *a);
-static int diva_pri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
-				  diva_xdi_um_cfg_cmd_t *cmd, int length);
-static int pri_get_serial_number(diva_os_xdi_adapter_t *a);
-static int diva_pri_stop_adapter(diva_os_xdi_adapter_t *a);
-static dword diva_pri_detect_dsps(diva_os_xdi_adapter_t *a);
-
-/*
-**  Check card revision
-*/
-static int pri_is_rev_2_card(int card_ordinal)
-{
-	switch (card_ordinal) {
-	case CARDTYPE_DIVASRV_P_30M_V2_PCI:
-	case CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI:
-		return (1);
-	}
-	return (0);
-}
-
-static void diva_pri_set_addresses(diva_os_xdi_adapter_t *a)
-{
-	a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 0;
-	a->resources.pci.mem_type_id[MEM_TYPE_CONTROL] = 2;
-	a->resources.pci.mem_type_id[MEM_TYPE_CONFIG] = 4;
-	a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 0;
-	a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 2;
-	a->resources.pci.mem_type_id[MEM_TYPE_CFG] = 4;
-	a->resources.pci.mem_type_id[MEM_TYPE_PROM] = 3;
-
-	a->xdi_adapter.Address = a->resources.pci.addr[0];
-	a->xdi_adapter.Control = a->resources.pci.addr[2];
-	a->xdi_adapter.Config = a->resources.pci.addr[4];
-
-	a->xdi_adapter.ram = a->resources.pci.addr[0];
-	a->xdi_adapter.ram += MP_SHARED_RAM_OFFSET;
-
-	a->xdi_adapter.reset = a->resources.pci.addr[2];
-	a->xdi_adapter.reset += MP_RESET;
-
-	a->xdi_adapter.cfg = a->resources.pci.addr[4];
-	a->xdi_adapter.cfg += MP_IRQ_RESET;
-
-	a->xdi_adapter.sdram_bar = a->resources.pci.bar[0];
-
-	a->xdi_adapter.prom = a->resources.pci.addr[3];
-}
-
-/*
-**  BAR0 - SDRAM, MP_MEMORY_SIZE, MP2_MEMORY_SIZE by Rev.2
-**  BAR1 - DEVICES,				0x1000
-**  BAR2 - CONTROL (REG), 0x2000
-**  BAR3 - FLASH (REG),		0x8000
-**  BAR4 - CONFIG (CFG),	0x1000
-*/
-int diva_pri_init_card(diva_os_xdi_adapter_t *a)
-{
-	int bar = 0;
-	int pri_rev_2;
-	unsigned long bar_length[5] = {
-		MP_MEMORY_SIZE,
-		0x1000,
-		0x2000,
-		0x8000,
-		0x1000
-	};
-
-	pri_rev_2 = pri_is_rev_2_card(a->CardOrdinal);
-
-	if (pri_rev_2) {
-		bar_length[0] = MP2_MEMORY_SIZE;
-	}
-	/*
-	  Set properties
-	*/
-	a->xdi_adapter.Properties = CardProperties[a->CardOrdinal];
-	DBG_LOG(("Load %s", a->xdi_adapter.Properties.Name))
-
-		/*
-		  First initialization step: get and check hardware resoures.
-		  Do not map resources and do not acecess card at this step
-		*/
-		for (bar = 0; bar < 5; bar++) {
-			a->resources.pci.bar[bar] =
-				divasa_get_pci_bar(a->resources.pci.bus,
-						   a->resources.pci.func, bar,
-						   a->resources.pci.hdev);
-			if (!a->resources.pci.bar[bar]
-			    || (a->resources.pci.bar[bar] == 0xFFFFFFF0)) {
-				DBG_ERR(("A: invalid bar[%d]=%08x", bar,
-					 a->resources.pci.bar[bar]))
-					return (-1);
-			}
-		}
-	a->resources.pci.irq =
-		(byte) divasa_get_pci_irq(a->resources.pci.bus,
-					  a->resources.pci.func,
-					  a->resources.pci.hdev);
-	if (!a->resources.pci.irq) {
-		DBG_ERR(("A: invalid irq"));
-		return (-1);
-	}
-
-	/*
-	  Map all BAR's
-	*/
-	for (bar = 0; bar < 5; bar++) {
-		a->resources.pci.addr[bar] =
-			divasa_remap_pci_bar(a, bar, a->resources.pci.bar[bar],
-					     bar_length[bar]);
-		if (!a->resources.pci.addr[bar]) {
-			DBG_ERR(("A: A(%d), can't map bar[%d]",
-				 a->controller, bar))
-				diva_pri_cleanup_adapter(a);
-			return (-1);
-		}
-	}
-
-	/*
-	  Set all memory areas
-	*/
-	diva_pri_set_addresses(a);
-
-	/*
-	  Get Serial Number of this adapter
-	*/
-	if (pri_get_serial_number(a)) {
-		dword serNo;
-		serNo = a->resources.pci.bar[1] & 0xffff0000;
-		serNo |= ((dword) a->resources.pci.bus) << 8;
-		serNo += (a->resources.pci.func + a->controller + 1);
-		a->xdi_adapter.serialNo = serNo & ~0xFF000000;
-		DBG_ERR(("A: A(%d) can't get Serial Number, generated serNo=%ld",
-			 a->controller, a->xdi_adapter.serialNo))
-			}
-
-
-	/*
-	  Initialize os objects
-	*/
-	if (diva_os_initialize_spin_lock(&a->xdi_adapter.isr_spin_lock, "isr")) {
-		diva_pri_cleanup_adapter(a);
-		return (-1);
-	}
-	if (diva_os_initialize_spin_lock
-	    (&a->xdi_adapter.data_spin_lock, "data")) {
-		diva_pri_cleanup_adapter(a);
-		return (-1);
-	}
-
-	strcpy(a->xdi_adapter.req_soft_isr.dpc_thread_name, "kdivasprid");
-
-	if (diva_os_initialize_soft_isr(&a->xdi_adapter.req_soft_isr,
-					DIDpcRoutine, &a->xdi_adapter)) {
-		diva_pri_cleanup_adapter(a);
-		return (-1);
-	}
-
-	/*
-	  Do not initialize second DPC - only one thread will be created
-	*/
-	a->xdi_adapter.isr_soft_isr.object =
-		a->xdi_adapter.req_soft_isr.object;
-
-	/*
-	  Next step of card initialization:
-	  set up all interface pointers
-	*/
-	a->xdi_adapter.Channels = CardProperties[a->CardOrdinal].Channels;
-	a->xdi_adapter.e_max = CardProperties[a->CardOrdinal].E_info;
-
-	a->xdi_adapter.e_tbl =
-		diva_os_malloc(0, a->xdi_adapter.e_max * sizeof(E_INFO));
-	if (!a->xdi_adapter.e_tbl) {
-		diva_pri_cleanup_adapter(a);
-		return (-1);
-	}
-	memset(a->xdi_adapter.e_tbl, 0x00, a->xdi_adapter.e_max * sizeof(E_INFO));
-
-	a->xdi_adapter.a.io = &a->xdi_adapter;
-	a->xdi_adapter.DIRequest = request;
-	a->interface.cleanup_adapter_proc = diva_pri_cleanup_adapter;
-	a->interface.cmd_proc = diva_pri_cmd_card_proc;
-
-	if (pri_rev_2) {
-		prepare_pri2_functions(&a->xdi_adapter);
-	} else {
-		prepare_pri_functions(&a->xdi_adapter);
-	}
-
-	a->dsp_mask = diva_pri_detect_dsps(a);
-
-	/*
-	  Allocate DMA map
-	*/
-	if (pri_rev_2) {
-		diva_init_dma_map(a->resources.pci.hdev,
-				  (struct _diva_dma_map_entry **) &a->xdi_adapter.dma_map, 32);
-	}
-
-	/*
-	  Set IRQ handler
-	*/
-	a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq;
-	sprintf(a->xdi_adapter.irq_info.irq_name,
-		"DIVA PRI %ld", (long) a->xdi_adapter.serialNo);
-
-	if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr,
-				 a->xdi_adapter.irq_info.irq_name)) {
-		diva_pri_cleanup_adapter(a);
-		return (-1);
-	}
-	a->xdi_adapter.irq_info.registered = 1;
-
-	diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name,
-		      a->resources.pci.irq, a->xdi_adapter.serialNo);
-
-	return (0);
-}
-
-static int diva_pri_cleanup_adapter(diva_os_xdi_adapter_t *a)
-{
-	int bar = 0;
-
-	/*
-	  Stop Adapter if adapter is running
-	*/
-	if (a->xdi_adapter.Initialized) {
-		diva_pri_stop_adapter(a);
-	}
-
-	/*
-	  Remove ISR Handler
-	*/
-	if (a->xdi_adapter.irq_info.registered) {
-		diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr);
-	}
-	a->xdi_adapter.irq_info.registered = 0;
-
-	/*
-	  Step 1: unmap all BAR's, if any was mapped
-	*/
-	for (bar = 0; bar < 5; bar++) {
-		if (a->resources.pci.bar[bar]
-		    && a->resources.pci.addr[bar]) {
-			divasa_unmap_pci_bar(a->resources.pci.addr[bar]);
-			a->resources.pci.bar[bar] = 0;
-			a->resources.pci.addr[bar] = NULL;
-		}
-	}
-
-	/*
-	  Free OS objects
-	*/
-	diva_os_cancel_soft_isr(&a->xdi_adapter.isr_soft_isr);
-	diva_os_cancel_soft_isr(&a->xdi_adapter.req_soft_isr);
-
-	diva_os_remove_soft_isr(&a->xdi_adapter.req_soft_isr);
-	a->xdi_adapter.isr_soft_isr.object = NULL;
-
-	diva_os_destroy_spin_lock(&a->xdi_adapter.isr_spin_lock, "rm");
-	diva_os_destroy_spin_lock(&a->xdi_adapter.data_spin_lock, "rm");
-
-	/*
-	  Free memory accupied by XDI adapter
-	*/
-	if (a->xdi_adapter.e_tbl) {
-		diva_os_free(0, a->xdi_adapter.e_tbl);
-		a->xdi_adapter.e_tbl = NULL;
-	}
-	a->xdi_adapter.Channels = 0;
-	a->xdi_adapter.e_max = 0;
-
-
-	/*
-	  Free adapter DMA map
-	*/
-	diva_free_dma_map(a->resources.pci.hdev,
-			  (struct _diva_dma_map_entry *) a->xdi_adapter.
-			  dma_map);
-	a->xdi_adapter.dma_map = NULL;
-
-
-	/*
-	  Detach this adapter from debug driver
-	*/
-
-	return (0);
-}
-
-/*
-**  Activate On Board Boot Loader
-*/
-static int diva_pri_reset_adapter(PISDN_ADAPTER IoAdapter)
-{
-	dword i;
-	struct mp_load __iomem *boot;
-
-	if (!IoAdapter->Address || !IoAdapter->reset) {
-		return (-1);
-	}
-	if (IoAdapter->Initialized) {
-		DBG_ERR(("A: A(%d) can't reset PRI adapter - please stop first",
-			 IoAdapter->ANum))
-			return (-1);
-	}
-
-	boot = (struct mp_load __iomem *) DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
-	WRITE_DWORD(&boot->err, 0);
-	DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-
-	IoAdapter->rstFnc(IoAdapter);
-
-	diva_os_wait(10);
-
-	boot = (struct mp_load __iomem *) DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
-	i = READ_DWORD(&boot->live);
-
-	diva_os_wait(10);
-	if (i == READ_DWORD(&boot->live)) {
-		DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-		DBG_ERR(("A: A(%d) CPU on PRI %ld is not alive!",
-			 IoAdapter->ANum, IoAdapter->serialNo))
-			return (-1);
-	}
-	if (READ_DWORD(&boot->err)) {
-		DBG_ERR(("A: A(%d) PRI %ld Board Selftest failed, error=%08lx",
-			 IoAdapter->ANum, IoAdapter->serialNo,
-			 READ_DWORD(&boot->err)))
-			DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-		return (-1);
-	}
-	DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-
-	/*
-	  Forget all outstanding entities
-	*/
-	IoAdapter->e_count = 0;
-	if (IoAdapter->e_tbl) {
-		memset(IoAdapter->e_tbl, 0x00,
-		       IoAdapter->e_max * sizeof(E_INFO));
-	}
-	IoAdapter->head = 0;
-	IoAdapter->tail = 0;
-	IoAdapter->assign = 0;
-	IoAdapter->trapped = 0;
-
-	memset(&IoAdapter->a.IdTable[0], 0x00,
-	       sizeof(IoAdapter->a.IdTable));
-	memset(&IoAdapter->a.IdTypeTable[0], 0x00,
-	       sizeof(IoAdapter->a.IdTypeTable));
-	memset(&IoAdapter->a.FlowControlIdTable[0], 0x00,
-	       sizeof(IoAdapter->a.FlowControlIdTable));
-	memset(&IoAdapter->a.FlowControlSkipTable[0], 0x00,
-	       sizeof(IoAdapter->a.FlowControlSkipTable));
-	memset(&IoAdapter->a.misc_flags_table[0], 0x00,
-	       sizeof(IoAdapter->a.misc_flags_table));
-	memset(&IoAdapter->a.rx_stream[0], 0x00,
-	       sizeof(IoAdapter->a.rx_stream));
-	memset(&IoAdapter->a.tx_stream[0], 0x00,
-	       sizeof(IoAdapter->a.tx_stream));
-	memset(&IoAdapter->a.tx_pos[0], 0x00, sizeof(IoAdapter->a.tx_pos));
-	memset(&IoAdapter->a.rx_pos[0], 0x00, sizeof(IoAdapter->a.rx_pos));
-
-	return (0);
-}
-
-static int
-diva_pri_write_sdram_block(PISDN_ADAPTER IoAdapter,
-			   dword address,
-			   const byte *data, dword length, dword limit)
-{
-	byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
-	byte __iomem *mem = p;
-
-	if (((address + length) >= limit) || !mem) {
-		DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
-		DBG_ERR(("A: A(%d) write PRI address=0x%08lx",
-			 IoAdapter->ANum, address + length))
-			return (-1);
-	}
-	mem += address;
-
-	/* memcpy_toio(), maybe? */
-	while (length--) {
-		WRITE_BYTE(mem++, *data++);
-	}
-
-	DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
-	return (0);
-}
-
-static int
-diva_pri_start_adapter(PISDN_ADAPTER IoAdapter,
-		       dword start_address, dword features)
-{
-	dword i;
-	int started = 0;
-	byte __iomem *p;
-	struct mp_load __iomem *boot = (struct mp_load __iomem *) DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
-	ADAPTER *a = &IoAdapter->a;
-
-	if (IoAdapter->Initialized) {
-		DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-		DBG_ERR(("A: A(%d) pri_start_adapter, adapter already running",
-			 IoAdapter->ANum))
-			return (-1);
-	}
-	if (!boot) {
-		DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-		DBG_ERR(("A: PRI %ld can't start, adapter not mapped",
-			 IoAdapter->serialNo))
-			return (-1);
-	}
-
-	sprintf(IoAdapter->Name, "A(%d)", (int) IoAdapter->ANum);
-	DBG_LOG(("A(%d) start PRI at 0x%08lx", IoAdapter->ANum,
-		 start_address))
-
-		WRITE_DWORD(&boot->addr, start_address);
-	WRITE_DWORD(&boot->cmd, 3);
-
-	for (i = 0; i < 300; ++i) {
-		diva_os_wait(10);
-		if ((READ_DWORD(&boot->signature) >> 16) == 0x4447) {
-			DBG_LOG(("A(%d) Protocol startup time %d.%02d seconds",
-				 IoAdapter->ANum, (i / 100), (i % 100)))
-				started = 1;
-			break;
-		}
-	}
-
-	if (!started) {
-		byte __iomem *p = (byte __iomem *)boot;
-		dword TrapId;
-		dword debug;
-		TrapId = READ_DWORD(&p[0x80]);
-		debug = READ_DWORD(&p[0x1c]);
-		DBG_ERR(("A(%d) Adapter start failed 0x%08lx, TrapId=%08lx, debug=%08lx",
-			 IoAdapter->ANum, READ_DWORD(&boot->signature),
-			 TrapId, debug))
-			DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-		if (IoAdapter->trapFnc) {
-			(*(IoAdapter->trapFnc)) (IoAdapter);
-		}
-		IoAdapter->stop(IoAdapter);
-		return (-1);
-	}
-	DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-
-	IoAdapter->Initialized = true;
-
-	/*
-	  Check Interrupt
-	*/
-	IoAdapter->IrqCount = 0;
-	p = DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
-	WRITE_DWORD(p, (dword)~0x03E00000);
-	DIVA_OS_MEM_DETACH_CFG(IoAdapter, p);
-	a->ReadyInt = 1;
-	a->ram_out(a, &PR_RAM->ReadyInt, 1);
-
-	for (i = 100; !IoAdapter->IrqCount && (i-- > 0); diva_os_wait(10));
-
-	if (!IoAdapter->IrqCount) {
-		DBG_ERR(("A: A(%d) interrupt test failed",
-			 IoAdapter->ANum))
-			IoAdapter->Initialized = false;
-		IoAdapter->stop(IoAdapter);
-		return (-1);
-	}
-
-	IoAdapter->Properties.Features = (word) features;
-
-	diva_xdi_display_adapter_features(IoAdapter->ANum);
-
-	DBG_LOG(("A(%d) PRI adapter successfully started", IoAdapter->ANum))
-		/*
-		  Register with DIDD
-		*/
-		diva_xdi_didd_register_adapter(IoAdapter->ANum);
-
-	return (0);
-}
-
-static void diva_pri_clear_interrupts(diva_os_xdi_adapter_t *a)
-{
-	PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-
-	/*
-	  clear any pending interrupt
-	*/
-	IoAdapter->disIrq(IoAdapter);
-
-	IoAdapter->tst_irq(&IoAdapter->a);
-	IoAdapter->clr_irq(&IoAdapter->a);
-	IoAdapter->tst_irq(&IoAdapter->a);
-
-	/*
-	  kill pending dpcs
-	*/
-	diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr);
-	diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr);
-}
-
-/*
-**  Stop Adapter, but do not unmap/unregister - adapter
-**  will be restarted later
-*/
-static int diva_pri_stop_adapter(diva_os_xdi_adapter_t *a)
-{
-	PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-	int i = 100;
-
-	if (!IoAdapter->ram) {
-		return (-1);
-	}
-	if (!IoAdapter->Initialized) {
-		DBG_ERR(("A: A(%d) can't stop PRI adapter - not running",
-			 IoAdapter->ANum))
-			return (-1);	/* nothing to stop */
-	}
-	IoAdapter->Initialized = 0;
-
-	/*
-	  Disconnect Adapter from DIDD
-	*/
-	diva_xdi_didd_remove_adapter(IoAdapter->ANum);
-
-	/*
-	  Stop interrupts
-	*/
-	a->clear_interrupts_proc = diva_pri_clear_interrupts;
-	IoAdapter->a.ReadyInt = 1;
-	IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt);
-	do {
-		diva_os_sleep(10);
-	} while (i-- && a->clear_interrupts_proc);
-
-	if (a->clear_interrupts_proc) {
-		diva_pri_clear_interrupts(a);
-		a->clear_interrupts_proc = NULL;
-		DBG_ERR(("A: A(%d) no final interrupt from PRI adapter",
-			 IoAdapter->ANum))
-			}
-	IoAdapter->a.ReadyInt = 0;
-
-	/*
-	  Stop and reset adapter
-	*/
-	IoAdapter->stop(IoAdapter);
-
-	return (0);
-}
-
-/*
-**  Process commands form configuration/download framework and from
-**  user mode
-**
-**  return 0 on success
-*/
-static int
-diva_pri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
-		       diva_xdi_um_cfg_cmd_t *cmd, int length)
-{
-	int ret = -1;
-
-	if (cmd->adapter != a->controller) {
-		DBG_ERR(("A: pri_cmd, invalid controller=%d != %d",
-			 cmd->adapter, a->controller))
-			return (-1);
-	}
-
-	switch (cmd->command) {
-	case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			*(dword *) a->xdi_mbox.data =
-				(dword) a->CardOrdinal;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_SERIAL_NR:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			*(dword *) a->xdi_mbox.data =
-				(dword) a->xdi_adapter.serialNo;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG:
-		a->xdi_mbox.data_length = sizeof(dword) * 9;
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			int i;
-			dword *data = (dword *) a->xdi_mbox.data;
-
-			for (i = 0; i < 8; i++) {
-				*data++ = a->resources.pci.bar[i];
-			}
-			*data++ = (dword) a->resources.pci.irq;
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_RESET_ADAPTER:
-		ret = diva_pri_reset_adapter(&a->xdi_adapter);
-		break;
-
-	case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK:
-		ret = diva_pri_write_sdram_block(&a->xdi_adapter,
-						 cmd->command_data.
-						 write_sdram.offset,
-						 (byte *)&cmd[1],
-						 cmd->command_data.
-						 write_sdram.length,
-						 pri_is_rev_2_card(a->
-								   CardOrdinal)
-						 ? MP2_MEMORY_SIZE :
-						 MP_MEMORY_SIZE);
-		break;
-
-	case DIVA_XDI_UM_CMD_STOP_ADAPTER:
-		ret = diva_pri_stop_adapter(a);
-		break;
-
-	case DIVA_XDI_UM_CMD_START_ADAPTER:
-		ret = diva_pri_start_adapter(&a->xdi_adapter,
-					     cmd->command_data.start.
-					     offset,
-					     cmd->command_data.start.
-					     features);
-		break;
-
-	case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES:
-		a->xdi_adapter.features =
-			cmd->command_data.features.features;
-		a->xdi_adapter.a.protocol_capabilities =
-			a->xdi_adapter.features;
-		DBG_TRC(("Set raw protocol features (%08x)",
-			 a->xdi_adapter.features))
-			ret = 0;
-		break;
-
-	case DIVA_XDI_UM_CMD_GET_CARD_STATE:
-		a->xdi_mbox.data_length = sizeof(dword);
-		a->xdi_mbox.data =
-			diva_os_malloc(0, a->xdi_mbox.data_length);
-		if (a->xdi_mbox.data) {
-			dword *data = (dword *) a->xdi_mbox.data;
-			if (!a->xdi_adapter.ram ||
-			    !a->xdi_adapter.reset ||
-			    !a->xdi_adapter.cfg) {
-				*data = 3;
-			} else if (a->xdi_adapter.trapped) {
-				*data = 2;
-			} else if (a->xdi_adapter.Initialized) {
-				*data = 1;
-			} else {
-				*data = 0;
-			}
-			a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-			ret = 0;
-		}
-		break;
-
-	case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY:
-		ret = diva_card_read_xlog(a);
-		break;
-
-	case DIVA_XDI_UM_CMD_READ_SDRAM:
-		if (a->xdi_adapter.Address) {
-			if (
-				(a->xdi_mbox.data_length =
-				 cmd->command_data.read_sdram.length)) {
-				if (
-					(a->xdi_mbox.data_length +
-					 cmd->command_data.read_sdram.offset) <
-					a->xdi_adapter.MemorySize) {
-					a->xdi_mbox.data =
-						diva_os_malloc(0,
-							       a->xdi_mbox.
-							       data_length);
-					if (a->xdi_mbox.data) {
-						byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(&a->xdi_adapter);
-						byte __iomem *src = p;
-						byte *dst = a->xdi_mbox.data;
-						dword len = a->xdi_mbox.data_length;
-
-						src += cmd->command_data.read_sdram.offset;
-
-						while (len--) {
-							*dst++ = READ_BYTE(src++);
-						}
-						a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-						DIVA_OS_MEM_DETACH_ADDRESS(&a->xdi_adapter, p);
-						ret = 0;
-					}
-				}
-			}
-		}
-		break;
-
-	default:
-		DBG_ERR(("A: A(%d) invalid cmd=%d", a->controller,
-			 cmd->command))
-			}
-
-	return (ret);
-}
-
-/*
-**  Get Serial Number
-*/
-static int pri_get_serial_number(diva_os_xdi_adapter_t *a)
-{
-	byte data[64];
-	int i;
-	dword len = sizeof(data);
-	volatile byte __iomem *config;
-	volatile byte __iomem *flash;
-	byte c;
-
-/*
- *  First set some GT6401x config registers before accessing the BOOT-ROM
- */
-	config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
-	c = READ_BYTE(&config[0xc3c]);
-	if (!(c & 0x08)) {
-		WRITE_BYTE(&config[0xc3c], c);	/* Base Address enable register */
-	}
-	WRITE_BYTE(&config[LOW_BOOTCS_DREG], 0x00);
-	WRITE_BYTE(&config[HI_BOOTCS_DREG], 0xFF);
-	DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-/*
- *  Read only the last 64 bytes of manufacturing data
- */
-	memset(data, '\0', len);
-	flash = DIVA_OS_MEM_ATTACH_PROM(&a->xdi_adapter);
-	for (i = 0; i < len; i++) {
-		data[i] = READ_BYTE(&flash[0x8000 - len + i]);
-	}
-	DIVA_OS_MEM_DETACH_PROM(&a->xdi_adapter, flash);
-
-	config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
-	WRITE_BYTE(&config[LOW_BOOTCS_DREG], 0xFC);	/* Disable FLASH EPROM access */
-	WRITE_BYTE(&config[HI_BOOTCS_DREG], 0xFF);
-	DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-
-	if (memcmp(&data[48], "DIVAserverPR", 12)) {
-#if !defined(DIVA_PRI_NO_PCI_BIOS_WORKAROUND)	/* { */
-		word cmd = 0, cmd_org;
-		void *addr;
-		dword addr1, addr3, addr4;
-		byte Bus, Slot;
-		void *hdev;
-		addr4 = a->resources.pci.bar[4];
-		addr3 = a->resources.pci.bar[3];	/* flash  */
-		addr1 = a->resources.pci.bar[1];	/* unused */
-
-		DBG_ERR(("A: apply Compaq BIOS workaround"))
-			DBG_LOG(("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-				 data[0], data[1], data[2], data[3],
-				 data[4], data[5], data[6], data[7]))
-
-			Bus = a->resources.pci.bus;
-		Slot = a->resources.pci.func;
-		hdev = a->resources.pci.hdev;
-		PCIread(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
-		PCIwrite(Bus, Slot, 0x04, &cmd, sizeof(cmd), hdev);
-
-		PCIwrite(Bus, Slot, 0x14, &addr4, sizeof(addr4), hdev);
-		PCIwrite(Bus, Slot, 0x20, &addr1, sizeof(addr1), hdev);
-
-		PCIwrite(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
-
-		addr = a->resources.pci.addr[1];
-		a->resources.pci.addr[1] = a->resources.pci.addr[4];
-		a->resources.pci.addr[4] = addr;
-
-		addr1 = a->resources.pci.bar[1];
-		a->resources.pci.bar[1] = a->resources.pci.bar[4];
-		a->resources.pci.bar[4] = addr1;
-
-		/*
-		  Try to read Flash again
-		*/
-		len = sizeof(data);
-
-		config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
-		if (!(config[0xc3c] & 0x08)) {
-			config[0xc3c] |= 0x08;	/* Base Address enable register */
-		}
-		config[LOW_BOOTCS_DREG] = 0x00;
-		config[HI_BOOTCS_DREG] = 0xFF;
-		DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-
-		memset(data, '\0', len);
-		flash = DIVA_OS_MEM_ATTACH_PROM(&a->xdi_adapter);
-		for (i = 0; i < len; i++) {
-			data[i] = flash[0x8000 - len + i];
-		}
-		DIVA_OS_MEM_ATTACH_PROM(&a->xdi_adapter, flash);
-		config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
-		config[LOW_BOOTCS_DREG] = 0xFC;
-		config[HI_BOOTCS_DREG] = 0xFF;
-		DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-
-		if (memcmp(&data[48], "DIVAserverPR", 12)) {
-			DBG_ERR(("A: failed to read serial number"))
-				DBG_LOG(("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-					 data[0], data[1], data[2], data[3],
-					 data[4], data[5], data[6], data[7]))
-				return (-1);
-		}
-#else				/* } { */
-		DBG_ERR(("A: failed to read DIVA signature word"))
-			DBG_LOG(("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-				 data[0], data[1], data[2], data[3],
-				 data[4], data[5], data[6], data[7]))
-			DBG_LOG(("%02x:%02x:%02x:%02x", data[47], data[46],
-				 data[45], data[44]))
-#endif				/* } */
-			}
-
-	a->xdi_adapter.serialNo =
-		(data[47] << 24) | (data[46] << 16) | (data[45] << 8) |
-		data[44];
-	if (!a->xdi_adapter.serialNo
-	    || (a->xdi_adapter.serialNo == 0xffffffff)) {
-		a->xdi_adapter.serialNo = 0;
-		DBG_ERR(("A: failed to read serial number"))
-			return (-1);
-	}
-
-	DBG_LOG(("Serial No.          : %ld", a->xdi_adapter.serialNo))
-		DBG_TRC(("Board Revision      : %d.%02d", (int) data[41],
-			 (int) data[40]))
-		DBG_TRC(("PLD revision        : %d.%02d", (int) data[33],
-			 (int) data[32]))
-		DBG_TRC(("Boot loader version : %d.%02d", (int) data[37],
-			 (int) data[36]))
-
-		DBG_TRC(("Manufacturing Date  : %d/%02d/%02d  (yyyy/mm/dd)",
-			 (int) ((data[28] > 90) ? 1900 : 2000) +
-			 (int) data[28], (int) data[29], (int) data[30]))
-
-		return (0);
-}
-
-void diva_os_prepare_pri2_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-void diva_os_prepare_pri_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-/*
-**  Checks presence of DSP on board
-*/
-static int
-dsp_check_presence(volatile byte __iomem *addr, volatile byte __iomem *data, int dsp)
-{
-	word pattern;
-
-	WRITE_WORD(addr, 0x4000);
-	WRITE_WORD(data, DSP_SIGNATURE_PROBE_WORD);
-
-	WRITE_WORD(addr, 0x4000);
-	pattern = READ_WORD(data);
-
-	if (pattern != DSP_SIGNATURE_PROBE_WORD) {
-		DBG_TRC(("W: DSP[%d] %04x(is) != %04x(should)",
-			 dsp, pattern, DSP_SIGNATURE_PROBE_WORD))
-			return (-1);
-	}
-
-	WRITE_WORD(addr, 0x4000);
-	WRITE_WORD(data, ~DSP_SIGNATURE_PROBE_WORD);
-
-	WRITE_WORD(addr, 0x4000);
-	pattern = READ_WORD(data);
-
-	if (pattern != (word)~DSP_SIGNATURE_PROBE_WORD) {
-		DBG_ERR(("A: DSP[%d] %04x(is) != %04x(should)",
-			 dsp, pattern, (word)~DSP_SIGNATURE_PROBE_WORD))
-			return (-2);
-	}
-
-	DBG_TRC(("DSP[%d] present", dsp))
-
-		return (0);
-}
-
-
-/*
-**  Check if DSP's are present and operating
-**  Information about detected DSP's is returned as bit mask
-**  Bit 0  - DSP1
-**  ...
-**  ...
-**  ...
-**  Bit 29 - DSP30
-*/
-static dword diva_pri_detect_dsps(diva_os_xdi_adapter_t *a)
-{
-	byte __iomem *base;
-	byte __iomem *p;
-	dword ret = 0;
-	dword row_offset[7] = {
-		0x00000000,
-		0x00000800,	/* 1 - ROW 1 */
-		0x00000840,	/* 2 - ROW 2 */
-		0x00001000,	/* 3 - ROW 3 */
-		0x00001040,	/* 4 - ROW 4 */
-		0x00000000	/* 5 - ROW 0 */
-	};
-
-	byte __iomem *dsp_addr_port;
-	byte __iomem *dsp_data_port;
-	byte row_state;
-	int dsp_row = 0, dsp_index, dsp_num;
-
-	if (!a->xdi_adapter.Control || !a->xdi_adapter.reset) {
-		return (0);
-	}
-
-	p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
-	WRITE_BYTE(p, _MP_RISC_RESET | _MP_DSP_RESET);
-	DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
-	diva_os_wait(5);
-
-	base = DIVA_OS_MEM_ATTACH_CONTROL(&a->xdi_adapter);
-
-	for (dsp_num = 0; dsp_num < 30; dsp_num++) {
-		dsp_row = dsp_num / 7 + 1;
-		dsp_index = dsp_num % 7;
-
-		dsp_data_port = base;
-		dsp_addr_port = base;
-
-		dsp_data_port += row_offset[dsp_row];
-		dsp_addr_port += row_offset[dsp_row];
-
-		dsp_data_port += (dsp_index * 8);
-		dsp_addr_port += (dsp_index * 8) + 0x80;
-
-		if (!dsp_check_presence
-		    (dsp_addr_port, dsp_data_port, dsp_num + 1)) {
-			ret |= (1 << dsp_num);
-		}
-	}
-	DIVA_OS_MEM_DETACH_CONTROL(&a->xdi_adapter, base);
-
-	p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
-	WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2);
-	DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
-	diva_os_wait(5);
-
-	/*
-	  Verify modules
-	*/
-	for (dsp_row = 0; dsp_row < 4; dsp_row++) {
-		row_state = ((ret >> (dsp_row * 7)) & 0x7F);
-		if (row_state && (row_state != 0x7F)) {
-			for (dsp_index = 0; dsp_index < 7; dsp_index++) {
-				if (!(row_state & (1 << dsp_index))) {
-					DBG_ERR(("A: MODULE[%d]-DSP[%d] failed",
-						 dsp_row + 1,
-						 dsp_index + 1))
-						}
-			}
-		}
-	}
-
-	if (!(ret & 0x10000000)) {
-		DBG_ERR(("A: ON BOARD-DSP[1] failed"))
-			}
-	if (!(ret & 0x20000000)) {
-		DBG_ERR(("A: ON BOARD-DSP[2] failed"))
-			}
-
-	/*
-	  Print module population now
-	*/
-	DBG_LOG(("+-----------------------+"))
-		DBG_LOG(("| DSP MODULE POPULATION |"))
-		DBG_LOG(("+-----------------------+"))
-		DBG_LOG(("|  1  |  2  |  3  |  4  |"))
-		DBG_LOG(("+-----------------------+"))
-		DBG_LOG(("|  %s  |  %s  |  %s  |  %s  |",
-			 ((ret >> (0 * 7)) & 0x7F) ? "Y" : "N",
-			 ((ret >> (1 * 7)) & 0x7F) ? "Y" : "N",
-			 ((ret >> (2 * 7)) & 0x7F) ? "Y" : "N",
-			 ((ret >> (3 * 7)) & 0x7F) ? "Y" : "N"))
-		DBG_LOG(("+-----------------------+"))
-
-		DBG_LOG(("DSP's(present-absent):%08x-%08x", ret,
-			 ~ret & 0x3fffffff))
-
-		return (ret);
-}
diff --git a/drivers/isdn/hardware/eicon/os_pri.h b/drivers/isdn/hardware/eicon/os_pri.h
deleted file mode 100644
index 0e91855..0000000
--- a/drivers/isdn/hardware/eicon/os_pri.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: os_pri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
-
-#ifndef __DIVA_OS_PRI_REV_1_H__
-#define __DIVA_OS_PRI_REV_1_H__
-
-int diva_pri_init_card(diva_os_xdi_adapter_t *a);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/pc.h b/drivers/isdn/hardware/eicon/pc.h
deleted file mode 100644
index 329c0c2..0000000
--- a/drivers/isdn/hardware/eicon/pc.h
+++ /dev/null
@@ -1,738 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef PC_H_INCLUDED  /* { */
-#define PC_H_INCLUDED
-/*------------------------------------------------------------------*/
-/* buffer definition                                                */
-/*------------------------------------------------------------------*/
-typedef struct {
-	word length;          /* length of data/parameter field           */
-	byte P[270];          /* data/parameter field                     */
-} PBUFFER;
-/*------------------------------------------------------------------*/
-/* dual port ram structure                                          */
-/*------------------------------------------------------------------*/
-struct dual
-{
-	byte Req;             /* request register                         */
-	byte ReqId;           /* request task/entity identification       */
-	byte Rc;              /* return code register                     */
-	byte RcId;            /* return code task/entity identification   */
-	byte Ind;             /* Indication register                      */
-	byte IndId;           /* Indication task/entity identification    */
-	byte IMask;           /* Interrupt Mask Flag                      */
-	byte RNR;             /* Receiver Not Ready (set by PC)           */
-	byte XLock;           /* XBuffer locked Flag                      */
-	byte Int;             /* ISDN-S interrupt                         */
-	byte ReqCh;           /* Channel field for layer-3 Requests       */
-	byte RcCh;            /* Channel field for layer-3 Returncodes    */
-	byte IndCh;           /* Channel field for layer-3 Indications    */
-	byte MInd;            /* more data indication field               */
-	word MLength;         /* more data total packet length            */
-	byte ReadyInt;        /* request field for ready interrupt        */
-	byte SWReg;           /* Software register for special purposes   */
-	byte Reserved[11];    /* reserved space                           */
-	byte InterfaceType;   /* interface type 1=16K interface           */
-	word Signature;       /* ISDN-S adapter Signature (GD)            */
-	PBUFFER XBuffer;      /* Transmit Buffer                          */
-	PBUFFER RBuffer;      /* Receive Buffer                           */
-};
-/*------------------------------------------------------------------*/
-/* SWReg Values (0 means no command)                                */
-/*------------------------------------------------------------------*/
-#define SWREG_DIE_WITH_LEDON  0x01
-#define SWREG_HALT_CPU        0x02 /* Push CPU into a while (1) loop */
-/*------------------------------------------------------------------*/
-/* Id Fields Coding                                                 */
-/*------------------------------------------------------------------*/
-#define ID_MASK 0xe0    /* Mask for the ID field                    */
-#define GL_ERR_ID 0x1f  /* ID for error reporting on global requests*/
-#define DSIG_ID  0x00   /* ID for D-channel signaling               */
-#define NL_ID    0x20   /* ID for network-layer access (B or D)     */
-#define BLLC_ID  0x60   /* ID for B-channel link level access       */
-#define TASK_ID  0x80   /* ID for dynamic user tasks                */
-#define TIMER_ID 0xa0   /* ID for timer task                        */
-#define TEL_ID   0xc0   /* ID for telephone support                 */
-#define MAN_ID   0xe0   /* ID for management                        */
-/*------------------------------------------------------------------*/
-/* ASSIGN and REMOVE requests are the same for all entities         */
-/*------------------------------------------------------------------*/
-#define ASSIGN  0x01
-#define UREMOVE 0xfe /* without return code */
-#define REMOVE  0xff
-/*------------------------------------------------------------------*/
-/* Timer Interrupt Task Interface                                   */
-/*------------------------------------------------------------------*/
-#define ASSIGN_TIM 0x01
-#define REMOVE_TIM 0xff
-/*------------------------------------------------------------------*/
-/* dynamic user task interface                                      */
-/*------------------------------------------------------------------*/
-#define ASSIGN_TSK 0x01
-#define REMOVE_TSK 0xff
-#define LOAD 0xf0
-#define RELOCATE 0xf1
-#define START 0xf2
-#define LOAD2 0xf3
-#define RELOCATE2 0xf4
-/*------------------------------------------------------------------*/
-/* dynamic user task messages                                       */
-/*------------------------------------------------------------------*/
-#define TSK_B2          0x0000
-#define TSK_WAKEUP      0x2000
-#define TSK_TIMER       0x4000
-#define TSK_TSK         0x6000
-#define TSK_PC          0xe000
-/*------------------------------------------------------------------*/
-/* LL management primitives                                         */
-/*------------------------------------------------------------------*/
-#define ASSIGN_LL 1     /* assign logical link                      */
-#define REMOVE_LL 0xff  /* remove logical link                      */
-/*------------------------------------------------------------------*/
-/* LL service primitives                                            */
-/*------------------------------------------------------------------*/
-#define LL_UDATA 1      /* link unit data request/indication        */
-#define LL_ESTABLISH 2  /* link establish request/indication        */
-#define LL_RELEASE 3    /* link release request/indication          */
-#define LL_DATA 4       /* data request/indication                  */
-#define LL_LOCAL 5      /* switch to local operation (COM only)     */
-#define LL_DATA_PEND 5  /* data pending indication (SDLC SHM only)  */
-#define LL_REMOTE 6     /* switch to remote operation (COM only)    */
-#define LL_TEST 8       /* link test request                        */
-#define LL_MDATA 9      /* more data request/indication             */
-#define LL_BUDATA 10    /* broadcast unit data request/indication   */
-#define LL_XID 12       /* XID command request/indication           */
-#define LL_XID_R 13     /* XID response request/indication          */
-/*------------------------------------------------------------------*/
-/* NL service primitives                                            */
-/*------------------------------------------------------------------*/
-#define N_MDATA         1       /* more data to come REQ/IND        */
-#define N_CONNECT       2       /* OSI N-CONNECT REQ/IND            */
-#define N_CONNECT_ACK   3       /* OSI N-CONNECT CON/RES            */
-#define N_DISC          4       /* OSI N-DISC REQ/IND               */
-#define N_DISC_ACK      5       /* OSI N-DISC CON/RES               */
-#define N_RESET         6       /* OSI N-RESET REQ/IND              */
-#define N_RESET_ACK     7       /* OSI N-RESET CON/RES              */
-#define N_DATA          8       /* OSI N-DATA REQ/IND               */
-#define N_EDATA         9       /* OSI N-EXPEDITED DATA REQ/IND     */
-#define N_UDATA         10      /* OSI D-UNIT-DATA REQ/IND          */
-#define N_BDATA         11      /* BROADCAST-DATA REQ/IND           */
-#define N_DATA_ACK      12      /* data ack ind for D-bit procedure */
-#define N_EDATA_ACK     13      /* data ack ind for INTERRUPT       */
-#define N_XON           15      /* clear RNR state */
-#define N_COMBI_IND     N_XON   /* combined indication              */
-#define N_Q_BIT         0x10    /* Q-bit for req/ind                */
-#define N_M_BIT         0x20    /* M-bit for req/ind                */
-#define N_D_BIT         0x40    /* D-bit for req/ind                */
-/*------------------------------------------------------------------*/
-/* Signaling management primitives                                  */
-/*------------------------------------------------------------------*/
-#define ASSIGN_SIG  1    /* assign signaling task                    */
-#define UREMOVE_SIG 0xfe /* remove signaling task without return code*/
-#define REMOVE_SIG  0xff /* remove signaling task                    */
-/*------------------------------------------------------------------*/
-/* Signaling service primitives                                     */
-/*------------------------------------------------------------------*/
-#define CALL_REQ 1      /* call request                             */
-#define CALL_CON 1      /* call confirmation                        */
-#define CALL_IND 2      /* incoming call connected                  */
-#define LISTEN_REQ 2    /* listen request                           */
-#define HANGUP 3        /* hangup request/indication                */
-#define SUSPEND 4       /* call suspend request/confirm             */
-#define RESUME 5        /* call resume request/confirm              */
-#define SUSPEND_REJ 6   /* suspend rejected indication              */
-#define USER_DATA 8     /* user data for user to user signaling     */
-#define CONGESTION 9    /* network congestion indication            */
-#define INDICATE_REQ 10 /* request to indicate an incoming call     */
-#define INDICATE_IND 10 /* indicates that there is an incoming call */
-#define CALL_RES 11     /* accept an incoming call                  */
-#define CALL_ALERT 12   /* send ALERT for incoming call             */
-#define INFO_REQ 13     /* INFO request                             */
-#define INFO_IND 13     /* INFO indication                          */
-#define REJECT 14       /* reject an incoming call                  */
-#define RESOURCES 15    /* reserve B-Channel hardware resources     */
-#define HW_CTRL 16      /* B-Channel hardware IOCTL req/ind         */
-#define TEL_CTRL 16     /* Telephone control request/indication     */
-#define STATUS_REQ 17   /* Request D-State (returned in INFO_IND)   */
-#define FAC_REG_REQ 18  /* 1TR6 connection independent fac reg      */
-#define FAC_REG_ACK 19  /* 1TR6 fac registration acknowledge        */
-#define FAC_REG_REJ 20  /* 1TR6 fac registration reject             */
-#define CALL_COMPLETE 21/* send a CALL_PROC for incoming call       */
-#define SW_CTRL 22      /* extended software features               */
-#define REGISTER_REQ 23 /* Q.931 connection independent reg req     */
-#define REGISTER_IND 24 /* Q.931 connection independent reg ind     */
-#define FACILITY_REQ 25 /* Q.931 connection independent fac req     */
-#define FACILITY_IND 26 /* Q.931 connection independent fac ind     */
-#define NCR_INFO_REQ 27 /* INFO_REQ with NULL CR                    */
-#define GCR_MIM_REQ 28  /* MANAGEMENT_INFO_REQ with global CR       */
-#define SIG_CTRL    29  /* Control for Signalling Hardware          */
-#define DSP_CTRL    30  /* Control for DSPs                         */
-#define LAW_REQ      31 /* Law config request for (returns info_i)  */
-#define SPID_CTRL    32 /* Request/indication SPID related          */
-#define NCR_FACILITY 33 /* Request/indication with NULL/DUMMY CR    */
-#define CALL_HOLD    34 /* Request/indication to hold a CALL        */
-#define CALL_RETRIEVE 35 /* Request/indication to retrieve a CALL   */
-#define CALL_HOLD_ACK 36 /* OK of                hold a CALL        */
-#define CALL_RETRIEVE_ACK 37 /* OK of             retrieve a CALL   */
-#define CALL_HOLD_REJ 38 /* Reject of            hold a CALL        */
-#define CALL_RETRIEVE_REJ 39 /* Reject of         retrieve a call   */
-#define GCR_RESTART   40 /* Send/Receive Restart message            */
-#define S_SERVICE     41 /* Send/Receive Supplementary Service      */
-#define S_SERVICE_REJ 42 /* Reject Supplementary Service indication */
-#define S_SUPPORTED   43 /* Req/Ind to get Supported Services       */
-#define STATUS_ENQ    44 /* Req to send the D-ch request if !state0 */
-#define CALL_GUARD    45 /* Req/Ind to use the FLAGS_CALL_OUTCHECK  */
-#define CALL_GUARD_HP 46 /* Call Guard function to reject a call    */
-#define CALL_GUARD_IF 47 /* Call Guard function, inform the appl    */
-#define SSEXT_REQ     48 /* Supplem.Serv./QSIG specific request     */
-#define SSEXT_IND     49 /* Supplem.Serv./QSIG specific indication  */
-/* reserved commands for the US protocols */
-#define INT_3PTY_NIND 50 /* US       specific indication            */
-#define INT_CF_NIND   51 /* US       specific indication            */
-#define INT_3PTY_DROP 52 /* US       specific indication            */
-#define INT_MOVE_CONF 53 /* US       specific indication            */
-#define INT_MOVE_RC   54 /* US       specific indication            */
-#define INT_MOVE_FLIPPED_CONF 55 /* US specific indication          */
-#define INT_X5NI_OK   56 /* internal transfer OK indication         */
-#define INT_XDMS_START 57 /* internal transfer OK indication        */
-#define INT_XDMS_STOP 58 /* internal transfer finish indication     */
-#define INT_XDMS_STOP2 59 /* internal transfer send FA              */
-#define INT_CUSTCONF_REJ 60 /* internal conference reject           */
-#define INT_CUSTXFER 61 /* internal transfer request                */
-#define INT_CUSTX_NIND 62 /* internal transfer ack                  */
-#define INT_CUSTXREJ_NIND 63 /* internal transfer rej               */
-#define INT_X5NI_CF_XFER  64 /* internal transfer OK indication     */
-#define VSWITCH_REQ 65        /* communication between protocol and */
-#define VSWITCH_IND 66        /* capifunctions for D-CH-switching   */
-#define MWI_POLL 67     /* Message Waiting Status Request fkt */
-#define CALL_PEND_NOTIFY 68 /* notify capi to set new listen        */
-#define DO_NOTHING 69       /* dont do somethin if you get this     */
-#define INT_CT_REJ 70       /* ECT rejected internal command        */
-#define CALL_HOLD_COMPLETE 71 /* In NT Mode indicate hold complete  */
-#define CALL_RETRIEVE_COMPLETE 72 /* In NT Mode indicate retrieve complete  */
-/*------------------------------------------------------------------*/
-/* management service primitives                                    */
-/*------------------------------------------------------------------*/
-#define MAN_READ        2
-#define MAN_WRITE       3
-#define MAN_EXECUTE     4
-#define MAN_EVENT_ON    5
-#define MAN_EVENT_OFF   6
-#define MAN_LOCK        7
-#define MAN_UNLOCK      8
-#define MAN_INFO_IND    2
-#define MAN_EVENT_IND   3
-#define MAN_TRACE_IND   4
-#define MAN_COMBI_IND   9
-#define MAN_ESC         0x80
-/*------------------------------------------------------------------*/
-/* return code coding                                               */
-/*------------------------------------------------------------------*/
-#define UNKNOWN_COMMAND         0x01    /* unknown command          */
-#define WRONG_COMMAND           0x02    /* wrong command            */
-#define WRONG_ID                0x03    /* unknown task/entity id   */
-#define WRONG_CH                0x04    /* wrong task/entity id     */
-#define UNKNOWN_IE              0x05    /* unknown information el.  */
-#define WRONG_IE                0x06    /* wrong information el.    */
-#define OUT_OF_RESOURCES        0x07    /* ISDN-S card out of res.  */
-#define ISDN_GUARD_REJ          0x09    /* ISDN-Guard SuppServ rej  */
-#define N_FLOW_CONTROL          0x10    /* Flow-Control, retry      */
-#define ASSIGN_RC               0xe0    /* ASSIGN acknowledgement   */
-#define ASSIGN_OK               0xef    /* ASSIGN OK                */
-#define OK_FC                   0xfc    /* Flow-Control RC          */
-#define READY_INT               0xfd    /* Ready interrupt          */
-#define TIMER_INT               0xfe    /* timer interrupt          */
-#define OK                      0xff    /* command accepted         */
-/*------------------------------------------------------------------*/
-/* information elements                                             */
-/*------------------------------------------------------------------*/
-#define SHIFT 0x90              /* codeset shift                    */
-#define MORE 0xa0               /* more data                        */
-#define SDNCMPL 0xa1            /* sending complete                 */
-#define CL 0xb0                 /* congestion level                 */
-/* codeset 0                                                */
-#define SMSG 0x00               /* segmented message                */
-#define BC  0x04                /* Bearer Capability                */
-#define CAU 0x08                /* cause                            */
-#define CAD 0x0c                /* Connected address                */
-#define CAI 0x10                /* call identity                    */
-#define CHI 0x18                /* channel identification           */
-#define LLI 0x19                /* logical link id                  */
-#define CHA 0x1a                /* charge advice                    */
-#define FTY 0x1c                /* Facility                         */
-#define DT  0x29                /* ETSI date/time                   */
-#define KEY 0x2c                /* keypad information element       */
-#define UID 0x2d                /* User id information element      */
-#define DSP 0x28                /* display                          */
-#define SIG 0x34                /* signalling hardware control      */
-#define OAD 0x6c                /* origination address              */
-#define OSA 0x6d                /* origination sub-address          */
-#define CPN 0x70                /* called party number              */
-#define DSA 0x71                /* destination sub-address          */
-#define RDX 0x73                /* redirecting number extended      */
-#define RDN 0x74                /* redirecting number               */
-#define RIN 0x76                /* redirection number               */
-#define IUP 0x76                /* VN6 rerouter->PCS (codeset 6)    */
-#define IPU 0x77                /* VN6 PCS->rerouter (codeset 6)    */
-#define RI  0x79                /* restart indicator                */
-#define MIE 0x7a                /* management info element          */
-#define LLC 0x7c                /* low layer compatibility          */
-#define HLC 0x7d                /* high layer compatibility         */
-#define UUI 0x7e                /* user user information            */
-#define ESC 0x7f                /* escape extension                 */
-#define DLC 0x20                /* data link layer configuration    */
-#define NLC 0x21                /* network layer configuration      */
-#define REDIRECT_IE     0x22    /* redirection request/indication data */
-#define REDIRECT_NET_IE 0x23    /* redirection network override data   */
-/* codeset 6                                                */
-#define SIN 0x01                /* service indicator                */
-#define CIF 0x02                /* charging information             */
-#define DATE 0x03               /* date                             */
-#define CPS 0x07                /* called party status              */
-/*------------------------------------------------------------------*/
-/* ESC information elements                                         */
-/*------------------------------------------------------------------*/
-#define MSGTYPEIE        0x7a   /* Messagetype info element         */
-#define CRIE             0x7b   /* INFO info element                */
-#define CODESET6IE       0xec   /* Tunnel for Codeset 6 IEs         */
-#define VSWITCHIE        0xed   /* VSwitch info element             */
-#define SSEXTIE          0xee   /* Supplem. Service info element    */
-#define PROFILEIE        0xef   /* Profile info element             */
-/*------------------------------------------------------------------*/
-/* TEL_CTRL contents                                                */
-/*------------------------------------------------------------------*/
-#define RING_ON         0x01
-#define RING_OFF        0x02
-#define HANDS_FREE_ON   0x03
-#define HANDS_FREE_OFF  0x04
-#define ON_HOOK         0x80
-#define OFF_HOOK        0x90
-/* operation values used by ETSI supplementary services */
-#define THREE_PTY_BEGIN           0x04
-#define THREE_PTY_END             0x05
-#define ECT_EXECUTE               0x06
-#define ACTIVATION_DIVERSION      0x07
-#define DEACTIVATION_DIVERSION    0x08
-#define CALL_DEFLECTION           0x0D
-#define INTERROGATION_DIVERSION   0x0B
-#define INTERROGATION_SERV_USR_NR 0x11
-#define ACTIVATION_MWI            0x20
-#define DEACTIVATION_MWI          0x21
-#define MWI_INDICATION            0x22
-#define MWI_RESPONSE              0x23
-#define CONF_BEGIN                0x28
-#define CONF_ADD                  0x29
-#define CONF_SPLIT                0x2a
-#define CONF_DROP                 0x2b
-#define CONF_ISOLATE              0x2c
-#define CONF_REATTACH             0x2d
-#define CONF_PARTYDISC            0x2e
-#define CCBS_INFO_RETAIN          0x2f
-#define CCBS_ERASECALLLINKAGEID   0x30
-#define CCBS_STOP_ALERTING        0x31
-#define CCBS_REQUEST              0x32
-#define CCBS_DEACTIVATE           0x33
-#define CCBS_INTERROGATE          0x34
-#define CCBS_STATUS               0x35
-#define CCBS_ERASE                0x36
-#define CCBS_B_FREE               0x37
-#define CCNR_INFO_RETAIN          0x38
-#define CCBS_REMOTE_USER_FREE     0x39
-#define CCNR_REQUEST              0x3a
-#define CCNR_INTERROGATE          0x3b
-#define GET_SUPPORTED_SERVICES    0xff
-#define DIVERSION_PROCEDURE_CFU     0x70
-#define DIVERSION_PROCEDURE_CFB     0x71
-#define DIVERSION_PROCEDURE_CFNR    0x72
-#define DIVERSION_DEACTIVATION_CFU  0x80
-#define DIVERSION_DEACTIVATION_CFB  0x81
-#define DIVERSION_DEACTIVATION_CFNR 0x82
-#define DIVERSION_INTERROGATE_NUM   0x11
-#define DIVERSION_INTERROGATE_CFU   0x60
-#define DIVERSION_INTERROGATE_CFB   0x61
-#define DIVERSION_INTERROGATE_CFNR  0x62
-/* Service Masks */
-#define SMASK_HOLD_RETRIEVE        0x00000001
-#define SMASK_TERMINAL_PORTABILITY 0x00000002
-#define SMASK_ECT                  0x00000004
-#define SMASK_3PTY                 0x00000008
-#define SMASK_CALL_FORWARDING      0x00000010
-#define SMASK_CALL_DEFLECTION      0x00000020
-#define SMASK_MCID                 0x00000040
-#define SMASK_CCBS                 0x00000080
-#define SMASK_MWI                  0x00000100
-#define SMASK_CCNR                 0x00000200
-#define SMASK_CONF                 0x00000400
-/* ----------------------------------------------
-   Types of transfers used to transfer the
-   information in the 'struct RC->Reserved2[8]'
-   The information is transferred as 2 dwords
-   (2 4Byte unsigned values)
-   First of them is the transfer type.
-   2^32-1 possible messages are possible in this way.
-   The context of the second one had no meaning
-   ---------------------------------------------- */
-#define DIVA_RC_TYPE_NONE              0x00000000
-#define DIVA_RC_TYPE_REMOVE_COMPLETE   0x00000008
-#define DIVA_RC_TYPE_STREAM_PTR        0x00000009
-#define DIVA_RC_TYPE_CMA_PTR           0x0000000a
-#define DIVA_RC_TYPE_OK_FC             0x0000000b
-#define DIVA_RC_TYPE_RX_DMA            0x0000000c
-/* ------------------------------------------------------
-   IO Control codes for IN BAND SIGNALING
-   ------------------------------------------------------ */
-#define CTRL_L1_SET_SIG_ID        5
-#define CTRL_L1_SET_DAD           6
-#define CTRL_L1_RESOURCES         7
-/* ------------------------------------------------------ */
-/* ------------------------------------------------------
-   Layer 2 types
-   ------------------------------------------------------ */
-#define X75T            1       /* x.75 for ttx                     */
-#define TRF             2       /* transparent with hdlc framing    */
-#define TRF_IN          3       /* transparent with hdlc fr. inc.   */
-#define SDLC            4       /* sdlc, sna layer-2                */
-#define X75             5       /* x.75 for btx                     */
-#define LAPD            6       /* lapd (Q.921)                     */
-#define X25_L2          7       /* x.25 layer-2                     */
-#define V120_L2         8       /* V.120 layer-2 protocol           */
-#define V42_IN          9       /* V.42 layer-2 protocol, incoming */
-#define V42            10       /* V.42 layer-2 protocol            */
-#define MDM_ATP        11       /* AT Parser built in the L2        */
-#define X75_V42BIS     12       /* x.75 with V.42bis                */
-#define RTPL2_IN       13       /* RTP layer-2 protocol, incoming  */
-#define RTPL2          14       /* RTP layer-2 protocol             */
-#define V120_V42BIS    15       /* V.120 asynchronous mode supporting V.42bis compression */
-#define LISTENER       27       /* Layer 2 to listen line */
-#define MTP2           28       /* MTP2 Layer 2 */
-#define PIAFS_CRC      29       /* PIAFS Layer 2 with CRC calculation at L2 */
-/* ------------------------------------------------------
-   PIAFS DLC DEFINITIONS
-   ------------------------------------------------------ */
-#define PIAFS_64K            0x01
-#define PIAFS_VARIABLE_SPEED 0x02
-#define PIAFS_CHINESE_SPEED    0x04
-#define PIAFS_UDATA_ABILITY_ID    0x80
-#define PIAFS_UDATA_ABILITY_DCDON 0x01
-#define PIAFS_UDATA_ABILITY_DDI   0x80
-/*
-  DLC of PIAFS :
-  Byte | 8 7 6 5 4 3 2 1
-  -----+--------------------------------------------------------
-  0 | 0 0 1 0 0 0 0 0  Data Link Configuration
-  1 | X X X X X X X X  Length of IE (at least 15 Bytes)
-  2 | 0 0 0 0 0 0 0 0  max. information field, LOW  byte (not used, fix 73 Bytes)
-  3 | 0 0 0 0 0 0 0 0  max. information field, HIGH byte (not used, fix 73 Bytes)
-  4 | 0 0 0 0 0 0 0 0  address A (not used)
-  5 | 0 0 0 0 0 0 0 0  address B (not used)
-  6 | 0 0 0 0 0 0 0 0  Mode (not used, fix 128)
-  7 | 0 0 0 0 0 0 0 0  Window Size (not used, fix 127)
-  8 | X X X X X X X X  XID Length, Low Byte (at least 7 Bytes)
-  9 | X X X X X X X X  XID Length, High Byte
-  10 | 0 0 0 0 0 C V S  PIAFS Protocol Speed configuration -> Note(1)
-  |                  S = 0 -> Protocol Speed is 32K
-  |                  S = 1 -> Protocol Speed is 64K
-  |                  V = 0 -> Protocol Speed is fixed
-  |                  V = 1 -> Protocol Speed is variable
-  |                  C = 0 -> speed setting according to standard
-  |                  C = 1 -> speed setting for chinese implementation
-  11 | 0 0 0 0 0 0 R T  P0 - V42bis Compression enable/disable, Low Byte
-  |                  T = 0 -> Transmit Direction enable
-  |                  T = 1 -> Transmit Direction disable
-  |                  R = 0 -> Receive  Direction enable
-  |                  R = 1 -> Receive  Direction disable
-  13 | 0 0 0 0 0 0 0 0  P0 - V42bis Compression enable/disable, High Byte
-  14 | X X X X X X X X  P1 - V42bis Dictionary Size, Low Byte
-  15 | X X X X X X X X  P1 - V42bis Dictionary Size, High Byte
-  16 | X X X X X X X X  P2 - V42bis String Length, Low Byte
-  17 | X X X X X X X X  P2 - V42bis String Length, High Byte
-  18 | X X X X X X X X  PIAFS extension length
-  19 | 1 0 0 0 0 0 0 0  PIAFS extension Id (0x80) - UDATA abilities
-  20 | U 0 0 0 0 0 0 D  UDATA abilities -> Note (2)
-  |                  up to now the following Bits are defined:
-  |                  D - signal DCD ON
-  |                  U - use extensive UDATA control communication
-  |                      for DDI test application
-  + Note (1): ----------+------+-----------------------------------------+
-  | PIAFS Protocol      | Bit  |                                         |
-  | Speed configuration |    S | Bit 1 - Protocol Speed                  |
-  |                     |      |         0 - 32K                         |
-  |                     |      |         1 - 64K (default)               |
-  |                     |    V | Bit 2 - Variable Protocol Speed         |
-  |                     |      |         0 - Speed is fix                |
-  |                     |      |         1 - Speed is variable (default) |
-  |                     |      |             OVERWRITES 32k Bit 1        |
-  |                     |    C | Bit 3   0 - Speed Settings according to |
-  |                     |      |             PIAFS specification         |
-  |                     |      |         1 - Speed setting for chinese   |
-  |                     |      |             PIAFS implementation        |
-  |                     |      | Explanation for chinese speed settings: |
-  |                     |      |         if Bit 3 is set the following   |
-  |                     |      |         rules apply:                    |
-  |                     |      |         Bit1=0 Bit2=0: 32k fix          |
-  |                     |      |         Bit1=1 Bit2=0: 64k fix          |
-  |                     |      |         Bit1=0 Bit2=1: PIAFS is trying  |
-  |                     |      |             to negotiate 32k is that is |
-  |                     |      |             not possible it tries to    |
-  |                     |      |             negotiate 64k               |
-  |                     |      |         Bit1=1 Bit2=1: PIAFS is trying  |
-  |                     |      |             to negotiate 64k is that is |
-  |                     |      |             not possible it tries to    |
-  |                     |      |             negotiate 32k               |
-  + Note (2): ----------+------+-----------------------------------------+
-  | PIAFS               | Bit  | this byte defines the usage of UDATA    |
-  | Implementation      |      | control communication                   |
-  | UDATA usage         |    D | Bit 1 - DCD-ON signalling               |
-  |                     |      |         0 - no DCD-ON is signalled      |
-  |                     |      |             (default)                   |
-  |                     |      |         1 - DCD-ON will be signalled    |
-  |                     |    U | Bit 8 - DDI test application UDATA      |
-  |                     |      |         control communication           |
-  |                     |      |         0 - no UDATA control            |
-  |                     |      |             communication (default)     |
-  |                     |      |             sets as well the DCD-ON     |
-  |                     |      |             signalling                  |
-  |                     |      |         1 - UDATA control communication |
-  |                     |      |             ATTENTION: Do not use these |
-  |                     |      |                        setting if you   |
-  |                     |      |                        are not really   |
-  |                     |      |                        that you need it |
-  |                     |      |                        and you know     |
-  |                     |      |                        exactly what you |
-  |                     |      |                        are doing.       |
-  |                     |      |                        You can easily   |
-  |                     |      |                        disable any      |
-  |                     |      |                        data transfer.   |
-  +---------------------+------+-----------------------------------------+
-*/
-/* ------------------------------------------------------
-   LISTENER DLC DEFINITIONS
-   ------------------------------------------------------ */
-#define LISTENER_FEATURE_MASK_CUMMULATIVE            0x0001
-/* ------------------------------------------------------
-   LISTENER META-FRAME CODE/PRIMITIVE DEFINITIONS
-   ------------------------------------------------------ */
-#define META_CODE_LL_UDATA_RX 0x01
-#define META_CODE_LL_UDATA_TX 0x02
-#define META_CODE_LL_DATA_RX  0x03
-#define META_CODE_LL_DATA_TX  0x04
-#define META_CODE_LL_MDATA_RX 0x05
-#define META_CODE_LL_MDATA_TX 0x06
-#define META_CODE_EMPTY       0x10
-#define META_CODE_LOST_FRAMES 0x11
-#define META_FLAG_TRUNCATED   0x0001
-/*------------------------------------------------------------------*/
-/* CAPI-like profile to indicate features on LAW_REQ                */
-/*------------------------------------------------------------------*/
-#define GL_INTERNAL_CONTROLLER_SUPPORTED     0x00000001L
-#define GL_EXTERNAL_EQUIPMENT_SUPPORTED      0x00000002L
-#define GL_HANDSET_SUPPORTED                 0x00000004L
-#define GL_DTMF_SUPPORTED                    0x00000008L
-#define GL_SUPPLEMENTARY_SERVICES_SUPPORTED  0x00000010L
-#define GL_CHANNEL_ALLOCATION_SUPPORTED      0x00000020L
-#define GL_BCHANNEL_OPERATION_SUPPORTED      0x00000040L
-#define GL_LINE_INTERCONNECT_SUPPORTED       0x00000080L
-#define B1_HDLC_SUPPORTED                    0x00000001L
-#define B1_TRANSPARENT_SUPPORTED             0x00000002L
-#define B1_V110_ASYNC_SUPPORTED              0x00000004L
-#define B1_V110_SYNC_SUPPORTED               0x00000008L
-#define B1_T30_SUPPORTED                     0x00000010L
-#define B1_HDLC_INVERTED_SUPPORTED           0x00000020L
-#define B1_TRANSPARENT_R_SUPPORTED           0x00000040L
-#define B1_MODEM_ALL_NEGOTIATE_SUPPORTED     0x00000080L
-#define B1_MODEM_ASYNC_SUPPORTED             0x00000100L
-#define B1_MODEM_SYNC_HDLC_SUPPORTED         0x00000200L
-#define B2_X75_SUPPORTED                     0x00000001L
-#define B2_TRANSPARENT_SUPPORTED             0x00000002L
-#define B2_SDLC_SUPPORTED                    0x00000004L
-#define B2_LAPD_SUPPORTED                    0x00000008L
-#define B2_T30_SUPPORTED                     0x00000010L
-#define B2_PPP_SUPPORTED                     0x00000020L
-#define B2_TRANSPARENT_NO_CRC_SUPPORTED      0x00000040L
-#define B2_MODEM_EC_COMPRESSION_SUPPORTED    0x00000080L
-#define B2_X75_V42BIS_SUPPORTED              0x00000100L
-#define B2_V120_ASYNC_SUPPORTED              0x00000200L
-#define B2_V120_ASYNC_V42BIS_SUPPORTED       0x00000400L
-#define B2_V120_BIT_TRANSPARENT_SUPPORTED    0x00000800L
-#define B2_LAPD_FREE_SAPI_SEL_SUPPORTED      0x00001000L
-#define B3_TRANSPARENT_SUPPORTED             0x00000001L
-#define B3_T90NL_SUPPORTED                   0x00000002L
-#define B3_ISO8208_SUPPORTED                 0x00000004L
-#define B3_X25_DCE_SUPPORTED                 0x00000008L
-#define B3_T30_SUPPORTED                     0x00000010L
-#define B3_T30_WITH_EXTENSIONS_SUPPORTED     0x00000020L
-#define B3_RESERVED_SUPPORTED                0x00000040L
-#define B3_MODEM_SUPPORTED                   0x00000080L
-#define MANUFACTURER_FEATURE_SLAVE_CODEC          0x00000001L
-#define MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS   0x00000002L
-#define MANUFACTURER_FEATURE_HARDDTMF             0x00000004L
-#define MANUFACTURER_FEATURE_SOFTDTMF_SEND        0x00000008L
-#define MANUFACTURER_FEATURE_DTMF_PARAMETERS      0x00000010L
-#define MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE     0x00000020L
-#define MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD      0x00000040L
-#define MANUFACTURER_FEATURE_V18                  0x00000080L
-#define MANUFACTURER_FEATURE_MIXER_CH_CH          0x00000100L
-#define MANUFACTURER_FEATURE_MIXER_CH_PC          0x00000200L
-#define MANUFACTURER_FEATURE_MIXER_PC_CH          0x00000400L
-#define MANUFACTURER_FEATURE_MIXER_PC_PC          0x00000800L
-#define MANUFACTURER_FEATURE_ECHO_CANCELLER       0x00001000L
-#define MANUFACTURER_FEATURE_RTP                  0x00002000L
-#define MANUFACTURER_FEATURE_T38                  0x00004000L
-#define MANUFACTURER_FEATURE_TRANSP_DELIVERY_CONF 0x00008000L
-#define MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL  0x00010000L
-#define MANUFACTURER_FEATURE_OOB_CHANNEL          0x00020000L
-#define MANUFACTURER_FEATURE_IN_BAND_CHANNEL      0x00040000L
-#define MANUFACTURER_FEATURE_IN_BAND_FEATURE      0x00080000L
-#define MANUFACTURER_FEATURE_PIAFS                0x00100000L
-#define MANUFACTURER_FEATURE_DTMF_TONE            0x00200000L
-#define MANUFACTURER_FEATURE_FAX_PAPER_FORMATS    0x00400000L
-#define MANUFACTURER_FEATURE_OK_FC_LABEL          0x00800000L
-#define MANUFACTURER_FEATURE_VOWN                 0x01000000L
-#define MANUFACTURER_FEATURE_XCONNECT             0x02000000L
-#define MANUFACTURER_FEATURE_DMACONNECT           0x04000000L
-#define MANUFACTURER_FEATURE_AUDIO_TAP            0x08000000L
-#define MANUFACTURER_FEATURE_FAX_NONSTANDARD      0x10000000L
-#define MANUFACTURER_FEATURE_SS7                  0x20000000L
-#define MANUFACTURER_FEATURE_MADAPTER             0x40000000L
-#define MANUFACTURER_FEATURE_MEASURE              0x80000000L
-#define MANUFACTURER_FEATURE2_LISTENING           0x00000001L
-#define MANUFACTURER_FEATURE2_SS_DIFFCONTPOSSIBLE 0x00000002L
-#define MANUFACTURER_FEATURE2_GENERIC_TONE        0x00000004L
-#define MANUFACTURER_FEATURE2_COLOR_FAX           0x00000008L
-#define MANUFACTURER_FEATURE2_SS_ECT_DIFFCONTPOSSIBLE 0x00000010L
-#define RTP_PRIM_PAYLOAD_PCMU_8000     0
-#define RTP_PRIM_PAYLOAD_1016_8000     1
-#define RTP_PRIM_PAYLOAD_G726_32_8000  2
-#define RTP_PRIM_PAYLOAD_GSM_8000      3
-#define RTP_PRIM_PAYLOAD_G723_8000     4
-#define RTP_PRIM_PAYLOAD_DVI4_8000     5
-#define RTP_PRIM_PAYLOAD_DVI4_16000    6
-#define RTP_PRIM_PAYLOAD_LPC_8000      7
-#define RTP_PRIM_PAYLOAD_PCMA_8000     8
-#define RTP_PRIM_PAYLOAD_G722_16000    9
-#define RTP_PRIM_PAYLOAD_QCELP_8000    12
-#define RTP_PRIM_PAYLOAD_G728_8000     14
-#define RTP_PRIM_PAYLOAD_G729_8000     18
-#define RTP_PRIM_PAYLOAD_GSM_HR_8000   30
-#define RTP_PRIM_PAYLOAD_GSM_EFR_8000  31
-#define RTP_ADD_PAYLOAD_BASE           32
-#define RTP_ADD_PAYLOAD_RED            32
-#define RTP_ADD_PAYLOAD_CN_8000        33
-#define RTP_ADD_PAYLOAD_DTMF           34
-#define RTP_PRIM_PAYLOAD_PCMU_8000_SUPPORTED     (1L << RTP_PRIM_PAYLOAD_PCMU_8000)
-#define RTP_PRIM_PAYLOAD_1016_8000_SUPPORTED     (1L << RTP_PRIM_PAYLOAD_1016_8000)
-#define RTP_PRIM_PAYLOAD_G726_32_8000_SUPPORTED  (1L << RTP_PRIM_PAYLOAD_G726_32_8000)
-#define RTP_PRIM_PAYLOAD_GSM_8000_SUPPORTED      (1L << RTP_PRIM_PAYLOAD_GSM_8000)
-#define RTP_PRIM_PAYLOAD_G723_8000_SUPPORTED     (1L << RTP_PRIM_PAYLOAD_G723_8000)
-#define RTP_PRIM_PAYLOAD_DVI4_8000_SUPPORTED     (1L << RTP_PRIM_PAYLOAD_DVI4_8000)
-#define RTP_PRIM_PAYLOAD_DVI4_16000_SUPPORTED    (1L << RTP_PRIM_PAYLOAD_DVI4_16000)
-#define RTP_PRIM_PAYLOAD_LPC_8000_SUPPORTED      (1L << RTP_PRIM_PAYLOAD_LPC_8000)
-#define RTP_PRIM_PAYLOAD_PCMA_8000_SUPPORTED     (1L << RTP_PRIM_PAYLOAD_PCMA_8000)
-#define RTP_PRIM_PAYLOAD_G722_16000_SUPPORTED    (1L << RTP_PRIM_PAYLOAD_G722_16000)
-#define RTP_PRIM_PAYLOAD_QCELP_8000_SUPPORTED    (1L << RTP_PRIM_PAYLOAD_QCELP_8000)
-#define RTP_PRIM_PAYLOAD_G728_8000_SUPPORTED     (1L << RTP_PRIM_PAYLOAD_G728_8000)
-#define RTP_PRIM_PAYLOAD_G729_8000_SUPPORTED     (1L << RTP_PRIM_PAYLOAD_G729_8000)
-#define RTP_PRIM_PAYLOAD_GSM_HR_8000_SUPPORTED   (1L << RTP_PRIM_PAYLOAD_GSM_HR_8000)
-#define RTP_PRIM_PAYLOAD_GSM_EFR_8000_SUPPORTED  (1L << RTP_PRIM_PAYLOAD_GSM_EFR_8000)
-#define RTP_ADD_PAYLOAD_RED_SUPPORTED            (1L << (RTP_ADD_PAYLOAD_RED - RTP_ADD_PAYLOAD_BASE))
-#define RTP_ADD_PAYLOAD_CN_8000_SUPPORTED        (1L << (RTP_ADD_PAYLOAD_CN_8000 - RTP_ADD_PAYLOAD_BASE))
-#define RTP_ADD_PAYLOAD_DTMF_SUPPORTED           (1L << (RTP_ADD_PAYLOAD_DTMF - RTP_ADD_PAYLOAD_BASE))
-/* virtual switching definitions */
-#define VSJOIN         1
-#define VSTRANSPORT    2
-#define VSGETPARAMS    3
-#define VSCAD          1
-#define VSRXCPNAME     2
-#define VSCALLSTAT     3
-#define VSINVOKEID    4
-#define VSCLMRKS       5
-#define VSTBCTIDENT    6
-#define VSETSILINKID   7
-#define VSSAMECONTROLLER 8
-/* Errorcodes for VSETSILINKID begin */
-#define VSETSILINKIDRRWC      1
-#define VSETSILINKIDREJECT    2
-#define VSETSILINKIDTIMEOUT   3
-#define VSETSILINKIDFAILCOUNT 4
-#define VSETSILINKIDERROR     5
-/* Errorcodes for VSETSILINKID end */
-/* -----------------------------------------------------------**
-** The PROTOCOL_FEATURE_STRING in feature.h (included         **
-** in prstart.sx and astart.sx) defines capabilities and      **
-** features of the actual protocol code. It's used as a bit   **
-** mask.                                                      **
-** The following Bits are defined:                            **
-** -----------------------------------------------------------*/
-#define PROTCAP_TELINDUS  0x0001  /* Telindus Variant of protocol code   */
-#define PROTCAP_MAN_IF    0x0002  /* Management interface implemented    */
-#define PROTCAP_V_42      0x0004  /* V42 implemented                     */
-#define PROTCAP_V90D      0x0008  /* V.90D (implies up to 384k DSP code) */
-#define PROTCAP_EXTD_FAX  0x0010  /* Extended FAX (ECM, 2D, T6, Polling) */
-#define PROTCAP_EXTD_RXFC 0x0020  /* RxFC (Extd Flow Control), OOB Chnl  */
-#define PROTCAP_VOIP      0x0040  /* VoIP (implies up to 512k DSP code)  */
-#define PROTCAP_CMA_ALLPR 0x0080  /* CMA support for all NL primitives   */
-#define PROTCAP_FREE8     0x0100  /* not used                            */
-#define PROTCAP_FREE9     0x0200  /* not used                            */
-#define PROTCAP_FREE10    0x0400  /* not used                            */
-#define PROTCAP_FREE11    0x0800  /* not used                            */
-#define PROTCAP_FREE12    0x1000  /* not used                            */
-#define PROTCAP_FREE13    0x2000  /* not used                            */
-#define PROTCAP_FREE14    0x4000  /* not used                            */
-#define PROTCAP_EXTENSION 0x8000  /* used for future extensions          */
-/* -----------------------------------------------------------* */
-/* Onhook data transmission ETS30065901 */
-/* Message Type */
-/*#define RESERVED4                 0x4*/
-#define CALL_SETUP                0x80
-#define MESSAGE_WAITING_INDICATOR 0x82
-/*#define RESERVED84                0x84*/
-/*#define RESERVED85                0x85*/
-#define ADVICE_OF_CHARGE          0x86
-/*1111 0001
-  to
-  1111 1111
-  F1H - Reserved for network operator use
-  to
-  FFH*/
-/* Parameter Types */
-#define DATE_AND_TIME                                           1
-#define CLI_PARAMETER_TYPE                                      2
-#define CALLED_DIRECTORY_NUMBER_PARAMETER_TYPE                  3
-#define REASON_FOR_ABSENCE_OF_CLI_PARAMETER_TYPE                4
-#define NAME_PARAMETER_TYPE                                     7
-#define REASON_FOR_ABSENCE_OF_CALLING_PARTY_NAME_PARAMETER_TYPE 8
-#define VISUAL_INDICATOR_PARAMETER_TYPE                         0xb
-#define COMPLEMENTARY_CLI_PARAMETER_TYPE                        0x10
-#define CALL_TYPE_PARAMETER_TYPE                                0x11
-#define FIRST_CALLED_LINE_DIRECTORY_NUMBER_PARAMETER_TYPE       0x12
-#define NETWORK_MESSAGE_SYSTEM_STATUS_PARAMETER_TYPE            0x13
-#define FORWARDED_CALL_TYPE_PARAMETER_TYPE                      0x15
-#define TYPE_OF_CALLING_USER_PARAMETER_TYPE                     0x16
-#define REDIRECTING_NUMBER_PARAMETER_TYPE                       0x1a
-#define EXTENSION_FOR_NETWORK_OPERATOR_USE_PARAMETER_TYPE       0xe0
-/* -----------------------------------------------------------* */
-#else
-#endif /* PC_H_INCLUDED  } */
diff --git a/drivers/isdn/hardware/eicon/pc_init.h b/drivers/isdn/hardware/eicon/pc_init.h
deleted file mode 100644
index d1d0086..0000000
--- a/drivers/isdn/hardware/eicon/pc_init.h
+++ /dev/null
@@ -1,267 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef PC_INIT_H_
-#define PC_INIT_H_
-/*------------------------------------------------------------------*/
-/*
-  Initialisation parameters for the card
-  0x0008 <byte> TEI
-  0x0009 <byte> NT2 flag
-  0x000a <byte> Default DID length
-  0x000b <byte> Disable watchdog flag
-  0x000c <byte> Permanent connection flag
-  0x000d <byte> Bit 3-8: L1 Hunt Group/Tristate
-  0x000d <byte> Bit 1: QSig small CR length if set to 1
-  0x000d <byte> Bit 2: QSig small CHI length if set to 1
-  0x000e <byte> Bit 1-3: Stable L2, 0=OnDemand,1=NoDisc,2=permanent
-  0x000e <byte> Bit 4: NT mode
-  0x000e <byte> Bit 5: QSig Channel ID format
-  0x000e <byte> Bit 6: QSig Call Forwarding Allowed Flag
-  0x000e <byte> Bit 7: Disable AutoSPID Flag
-  0x000f <byte> No order check flag
-  0x0010 <byte> Force companding type:0=default,1=a-law,2=u-law
-  0x0012 <byte> Low channel flag
-  0x0013 <byte> Protocol version
-  0x0014 <byte> CRC4 option:0=default,1=double_frm,2=multi_frm,3=auto
-  0x0015 <byte> Bit 0: NoHscx30, Bit 1: Loopback flag, Bit 2: ForceHscx30
-  0x0016 <byte> DSP info
-  0x0017-0x0019 Serial number
-  0x001a <byte> Card type
-  0x0020 <string> OAD 0
-  0x0040 <string> OSA 0
-  0x0060 <string> SPID 0 (if not T.1)
-  0x0060 <struct> if T.1: Robbed Bit Configuration
-  0x0060          length (8)
-  0x0061          RBS Answer Delay
-  0x0062          RBS Config Bit 3, 4:
-  0  0 -> Wink Start
-  1  0 -> Loop Start
-  0  1 -> Ground Start
-  1  1 -> reserved
-  Bit 5, 6:
-  0  0 -> Pulse Dial -> Rotary
-  1  0 -> DTMF
-  0  1 -> MF
-  1  1 -> reserved
-  0x0063          RBS RX Digit Timeout
-  0x0064          RBS Bearer Capability
-  0x0065-0x0069   RBS Debug Mask
-  0x0080 <string> OAD 1
-  0x00a0 <string> OSA 1
-  0x00c0 <string> SPID 1
-  0x00e0 <w-element list> Additional configuration
-*/
-#define PCINIT_END_OF_LIST                0x00
-#define PCINIT_MODEM_GUARD_TONE           0x01
-#define PCINIT_MODEM_MIN_SPEED            0x02
-#define PCINIT_MODEM_MAX_SPEED            0x03
-#define PCINIT_MODEM_PROTOCOL_OPTIONS     0x04
-#define PCINIT_FAX_OPTIONS                0x05
-#define PCINIT_FAX_MAX_SPEED              0x06
-#define PCINIT_MODEM_OPTIONS              0x07
-#define PCINIT_MODEM_NEGOTIATION_MODE     0x08
-#define PCINIT_MODEM_MODULATIONS_MASK     0x09
-#define PCINIT_MODEM_TRANSMIT_LEVEL       0x0a
-#define PCINIT_FAX_DISABLED_RESOLUTIONS   0x0b
-#define PCINIT_FAX_MAX_RECORDING_WIDTH    0x0c
-#define PCINIT_FAX_MAX_RECORDING_LENGTH   0x0d
-#define PCINIT_FAX_MIN_SCANLINE_TIME      0x0e
-#define PCINIT_US_EKTS_CACH_HANDLES       0x0f
-#define PCINIT_US_EKTS_BEGIN_CONF         0x10
-#define PCINIT_US_EKTS_DROP_CONF          0x11
-#define PCINIT_US_EKTS_CALL_TRANSFER      0x12
-#define PCINIT_RINGERTONE_OPTION          0x13
-#define PCINIT_CARD_ADDRESS               0x14
-#define PCINIT_FPGA_FEATURES              0x15
-#define PCINIT_US_EKTS_MWI                0x16
-#define PCINIT_MODEM_SPEAKER_CONTROL      0x17
-#define PCINIT_MODEM_SPEAKER_VOLUME       0x18
-#define PCINIT_MODEM_CARRIER_WAIT_TIME    0x19
-#define PCINIT_MODEM_CARRIER_LOSS_TIME    0x1a
-#define PCINIT_UNCHAN_B_MASK              0x1b
-#define PCINIT_PART68_LIMITER             0x1c
-#define PCINIT_XDI_FEATURES               0x1d
-#define PCINIT_QSIG_DIALECT               0x1e
-#define PCINIT_DISABLE_AUTOSPID_FLAG      0x1f
-#define PCINIT_FORCE_VOICE_MAIL_ALERT     0x20
-#define PCINIT_PIAFS_TURNAROUND_FRAMES    0x21
-#define PCINIT_L2_COUNT                   0x22
-#define PCINIT_QSIG_FEATURES              0x23
-#define PCINIT_NO_SIGNALLING              0x24
-#define PCINIT_CARD_SN                    0x25
-#define PCINIT_CARD_PORT                  0x26
-#define PCINIT_ALERTTO                    0x27
-#define PCINIT_MODEM_EYE_SETUP            0x28
-#define PCINIT_FAX_V34_OPTIONS            0x29
-/*------------------------------------------------------------------*/
-#define PCINIT_MODEM_GUARD_TONE_NONE            0x00
-#define PCINIT_MODEM_GUARD_TONE_550HZ           0x01
-#define PCINIT_MODEM_GUARD_TONE_1800HZ          0x02
-#define PCINIT_MODEM_GUARD_TONE_CHOICES         0x03
-#define PCINIT_MODEMPROT_DISABLE_V42_V42BIS     0x0001
-#define PCINIT_MODEMPROT_DISABLE_MNP_MNP5       0x0002
-#define PCINIT_MODEMPROT_REQUIRE_PROTOCOL       0x0004
-#define PCINIT_MODEMPROT_DISABLE_V42_DETECT     0x0008
-#define PCINIT_MODEMPROT_DISABLE_COMPRESSION    0x0010
-#define PCINIT_MODEMPROT_REQUIRE_PROTOCOL_V34UP 0x0020
-#define PCINIT_MODEMPROT_NO_PROTOCOL_IF_1200    0x0100
-#define PCINIT_MODEMPROT_BUFFER_IN_V42_DETECT   0x0200
-#define PCINIT_MODEMPROT_DISABLE_V42_SREJ       0x0400
-#define PCINIT_MODEMPROT_DISABLE_MNP3           0x0800
-#define PCINIT_MODEMPROT_DISABLE_MNP4           0x1000
-#define PCINIT_MODEMPROT_DISABLE_MNP10          0x2000
-#define PCINIT_MODEMPROT_NO_PROTOCOL_IF_V22BIS  0x4000
-#define PCINIT_MODEMPROT_NO_PROTOCOL_IF_V32BIS  0x8000
-#define PCINIT_MODEMCONFIG_LEASED_LINE_MODE     0x00000001L
-#define PCINIT_MODEMCONFIG_4_WIRE_OPERATION     0x00000002L
-#define PCINIT_MODEMCONFIG_DISABLE_BUSY_DETECT  0x00000004L
-#define PCINIT_MODEMCONFIG_DISABLE_CALLING_TONE 0x00000008L
-#define PCINIT_MODEMCONFIG_DISABLE_ANSWER_TONE  0x00000010L
-#define PCINIT_MODEMCONFIG_ENABLE_DIAL_TONE_DET 0x00000020L
-#define PCINIT_MODEMCONFIG_USE_POTS_INTERFACE   0x00000040L
-#define PCINIT_MODEMCONFIG_FORCE_RAY_TAYLOR_FAX 0x00000080L
-#define PCINIT_MODEMCONFIG_DISABLE_RETRAIN      0x00000100L
-#define PCINIT_MODEMCONFIG_DISABLE_STEPDOWN     0x00000200L
-#define PCINIT_MODEMCONFIG_DISABLE_SPLIT_SPEED  0x00000400L
-#define PCINIT_MODEMCONFIG_DISABLE_TRELLIS      0x00000800L
-#define PCINIT_MODEMCONFIG_ALLOW_RDL_TEST_LOOP  0x00001000L
-#define PCINIT_MODEMCONFIG_DISABLE_STEPUP       0x00002000L
-#define PCINIT_MODEMCONFIG_DISABLE_FLUSH_TIMER  0x00004000L
-#define PCINIT_MODEMCONFIG_REVERSE_DIRECTION    0x00008000L
-#define PCINIT_MODEMCONFIG_DISABLE_TX_REDUCTION 0x00010000L
-#define PCINIT_MODEMCONFIG_DISABLE_PRECODING    0x00020000L
-#define PCINIT_MODEMCONFIG_DISABLE_PREEMPHASIS  0x00040000L
-#define PCINIT_MODEMCONFIG_DISABLE_SHAPING      0x00080000L
-#define PCINIT_MODEMCONFIG_DISABLE_NONLINEAR_EN 0x00100000L
-#define PCINIT_MODEMCONFIG_DISABLE_MANUALREDUCT 0x00200000L
-#define PCINIT_MODEMCONFIG_DISABLE_16_POINT_TRN 0x00400000L
-#define PCINIT_MODEMCONFIG_DISABLE_2400_SYMBOLS 0x01000000L
-#define PCINIT_MODEMCONFIG_DISABLE_2743_SYMBOLS 0x02000000L
-#define PCINIT_MODEMCONFIG_DISABLE_2800_SYMBOLS 0x04000000L
-#define PCINIT_MODEMCONFIG_DISABLE_3000_SYMBOLS 0x08000000L
-#define PCINIT_MODEMCONFIG_DISABLE_3200_SYMBOLS 0x10000000L
-#define PCINIT_MODEMCONFIG_DISABLE_3429_SYMBOLS 0x20000000L
-#define PCINIT_MODEM_NEGOTIATE_HIGHEST          0x00
-#define PCINIT_MODEM_NEGOTIATE_DISABLED         0x01
-#define PCINIT_MODEM_NEGOTIATE_IN_CLASS         0x02
-#define PCINIT_MODEM_NEGOTIATE_V100             0x03
-#define PCINIT_MODEM_NEGOTIATE_V8               0x04
-#define PCINIT_MODEM_NEGOTIATE_V8BIS            0x05
-#define PCINIT_MODEM_NEGOTIATE_CHOICES          0x06
-#define PCINIT_MODEMMODULATION_DISABLE_V21      0x00000001L
-#define PCINIT_MODEMMODULATION_DISABLE_V23      0x00000002L
-#define PCINIT_MODEMMODULATION_DISABLE_V22      0x00000004L
-#define PCINIT_MODEMMODULATION_DISABLE_V22BIS   0x00000008L
-#define PCINIT_MODEMMODULATION_DISABLE_V32      0x00000010L
-#define PCINIT_MODEMMODULATION_DISABLE_V32BIS   0x00000020L
-#define PCINIT_MODEMMODULATION_DISABLE_V34      0x00000040L
-#define PCINIT_MODEMMODULATION_DISABLE_V90      0x00000080L
-#define PCINIT_MODEMMODULATION_DISABLE_BELL103  0x00000100L
-#define PCINIT_MODEMMODULATION_DISABLE_BELL212A 0x00000200L
-#define PCINIT_MODEMMODULATION_DISABLE_VFC      0x00000400L
-#define PCINIT_MODEMMODULATION_DISABLE_K56FLEX  0x00000800L
-#define PCINIT_MODEMMODULATION_DISABLE_X2       0x00001000L
-#define PCINIT_MODEMMODULATION_ENABLE_V29FDX    0x00010000L
-#define PCINIT_MODEMMODULATION_ENABLE_V33       0x00020000L
-#define PCINIT_MODEMMODULATION_ENABLE_V90A      0x00040000L
-#define PCINIT_MODEM_TRANSMIT_LEVEL_CHOICES     0x10
-#define PCINIT_MODEM_SPEAKER_OFF                0x00
-#define PCINIT_MODEM_SPEAKER_DURING_TRAIN       0x01
-#define PCINIT_MODEM_SPEAKER_TIL_CONNECT        0x02
-#define PCINIT_MODEM_SPEAKER_ALWAYS_ON          0x03
-#define PCINIT_MODEM_SPEAKER_CHOICES            0x04
-#define PCINIT_MODEM_SPEAKER_VOLUME_MIN         0x00
-#define PCINIT_MODEM_SPEAKER_VOLUME_LOW         0x01
-#define PCINIT_MODEM_SPEAKER_VOLUME_HIGH        0x02
-#define PCINIT_MODEM_SPEAKER_VOLUME_MAX         0x03
-#define PCINIT_MODEM_SPEAKER_VOLUME_CHOICES     0x04
-/*------------------------------------------------------------------*/
-#define PCINIT_FAXCONFIG_DISABLE_FINE           0x0001
-#define PCINIT_FAXCONFIG_DISABLE_ECM            0x0002
-#define PCINIT_FAXCONFIG_ECM_64_BYTES           0x0004
-#define PCINIT_FAXCONFIG_DISABLE_2D_CODING      0x0008
-#define PCINIT_FAXCONFIG_DISABLE_T6_CODING      0x0010
-#define PCINIT_FAXCONFIG_DISABLE_UNCOMPR        0x0020
-#define PCINIT_FAXCONFIG_REFUSE_POLLING         0x0040
-#define PCINIT_FAXCONFIG_HIDE_TOTAL_PAGES       0x0080
-#define PCINIT_FAXCONFIG_HIDE_ALL_HEADLINE      0x0100
-#define PCINIT_FAXCONFIG_HIDE_PAGE_INFO         0x0180
-#define PCINIT_FAXCONFIG_HEADLINE_OPTIONS_MASK  0x0180
-#define PCINIT_FAXCONFIG_DISABLE_FEATURE_FALLBACK 0x0200
-#define PCINIT_FAXCONFIG_V34FAX_CONTROL_RATE_1200 0x0800
-#define PCINIT_FAXCONFIG_DISABLE_V34FAX         0x1000
-#define PCINIT_FAXCONFIG_DISABLE_R8_0770_OR_200 0x01
-#define PCINIT_FAXCONFIG_DISABLE_R8_1540        0x02
-#define PCINIT_FAXCONFIG_DISABLE_R16_1540_OR_400 0x04
-#define PCINIT_FAXCONFIG_DISABLE_R4_0385_OR_100 0x08
-#define PCINIT_FAXCONFIG_DISABLE_300_300        0x10
-#define PCINIT_FAXCONFIG_DISABLE_INCH_BASED     0x40
-#define PCINIT_FAXCONFIG_DISABLE_METRIC_BASED   0x80
-#define PCINIT_FAXCONFIG_REC_WIDTH_ISO_A3       0
-#define PCINIT_FAXCONFIG_REC_WIDTH_ISO_B4       1
-#define PCINIT_FAXCONFIG_REC_WIDTH_ISO_A4       2
-#define PCINIT_FAXCONFIG_REC_WIDTH_COUNT        3
-#define PCINIT_FAXCONFIG_REC_LENGTH_UNLIMITED   0
-#define PCINIT_FAXCONFIG_REC_LENGTH_ISO_B4      1
-#define PCINIT_FAXCONFIG_REC_LENGTH_ISO_A4      2
-#define PCINIT_FAXCONFIG_REC_LENGTH_COUNT       3
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_00_00_00 0
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_05_05_05 1
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_10_05_05 2
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_10_10_10 3
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_10_10 4
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_20_20 5
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_20_20 6
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_40_40 7
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_RES_8    8
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_RES_9    9
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_RES_10   10
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_10_10_05 11
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_10_05 12
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_20_10 13
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_20_10 14
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_40_20 15
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_COUNT    16
-#define PCINIT_FAXCONFIG_DISABLE_TX_REDUCTION   0x00010000L
-#define PCINIT_FAXCONFIG_DISABLE_PRECODING      0x00020000L
-#define PCINIT_FAXCONFIG_DISABLE_PREEMPHASIS    0x00040000L
-#define PCINIT_FAXCONFIG_DISABLE_SHAPING        0x00080000L
-#define PCINIT_FAXCONFIG_DISABLE_NONLINEAR_EN   0x00100000L
-#define PCINIT_FAXCONFIG_DISABLE_MANUALREDUCT   0x00200000L
-#define PCINIT_FAXCONFIG_DISABLE_16_POINT_TRN   0x00400000L
-#define PCINIT_FAXCONFIG_DISABLE_2400_SYMBOLS   0x01000000L
-#define PCINIT_FAXCONFIG_DISABLE_2743_SYMBOLS   0x02000000L
-#define PCINIT_FAXCONFIG_DISABLE_2800_SYMBOLS   0x04000000L
-#define PCINIT_FAXCONFIG_DISABLE_3000_SYMBOLS   0x08000000L
-#define PCINIT_FAXCONFIG_DISABLE_3200_SYMBOLS   0x10000000L
-#define PCINIT_FAXCONFIG_DISABLE_3429_SYMBOLS   0x20000000L
-/*--------------------------------------------------------------------------*/
-#define PCINIT_XDI_CMA_FOR_ALL_NL_PRIMITIVES    0x01
-/*--------------------------------------------------------------------------*/
-#define PCINIT_FPGA_PLX_ACCESS_SUPPORTED        0x01
-/*--------------------------------------------------------------------------*/
-#endif
-/*--------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/pc_maint.h b/drivers/isdn/hardware/eicon/pc_maint.h
deleted file mode 100644
index 496f018..0000000
--- a/drivers/isdn/hardware/eicon/pc_maint.h
+++ /dev/null
@@ -1,160 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifdef PLATFORM_GT_32BIT
-/* #define POINTER_32BIT byte * __ptr32 */
-#define POINTER_32BIT dword
-#else
-#define POINTER_32BIT byte *
-#endif
-#if !defined(MIPS_SCOM)
-#define BUFFER_SZ  48
-#define MAINT_OFFS 0x380
-#else
-#define BUFFER_SZ  128
-#if defined(PRI)
-#define MAINT_OFFS 0xef00
-#else
-#define MAINT_OFFS 0xff00
-#endif
-#endif
-#define MIPS_BUFFER_SZ  128
-#if defined(PRI)
-#define MIPS_MAINT_OFFS 0xef00
-#else
-#define MIPS_MAINT_OFFS 0xff00
-#endif
-#define LOG                     1
-#define MEMR                    2
-#define MEMW                    3
-#define IOR                     4
-#define IOW                     5
-#define B1TEST                  6
-#define B2TEST                  7
-#define BTESTOFF                8
-#define DSIG_STATS              9
-#define B_CH_STATS              10
-#define D_CH_STATS              11
-#define BL1_STATS               12
-#define BL1_STATS_C             13
-#define GET_VERSION             14
-#define OS_STATS                15
-#define XLOG_SET_MASK           16
-#define XLOG_GET_MASK           17
-#define DSP_READ                20
-#define DSP_WRITE               21
-#define OK 0xff
-#define MORE_EVENTS 0xfe
-#define NO_EVENT 1
-struct DSigStruc
-{
-	byte Id;
-	byte u;
-	byte listen;
-	byte active;
-	byte sin[3];
-	byte bc[6];
-	byte llc[6];
-	byte hlc[6];
-	byte oad[20];
-};
-struct BL1Struc {
-	dword cx_b1;
-	dword cx_b2;
-	dword cr_b1;
-	dword cr_b2;
-	dword px_b1;
-	dword px_b2;
-	dword pr_b1;
-	dword pr_b2;
-	word er_b1;
-	word er_b2;
-};
-struct L2Struc {
-	dword XTotal;
-	dword RTotal;
-	word XError;
-	word RError;
-};
-struct OSStruc {
-	dword free_n;
-};
-typedef union
-{
-	struct DSigStruc DSigStats;
-	struct BL1Struc BL1Stats;
-	struct L2Struc L2Stats;
-	struct OSStruc OSStats;
-	byte   b[BUFFER_SZ];
-	word   w[BUFFER_SZ >> 1];
-	word   l[BUFFER_SZ >> 2]; /* word is wrong, do not use! Use 'd' instead. */
-	dword  d[BUFFER_SZ >> 2];
-} BUFFER;
-typedef union
-{
-	struct DSigStruc DSigStats;
-	struct BL1Struc BL1Stats;
-	struct L2Struc L2Stats;
-	struct OSStruc OSStats;
-	byte   b[MIPS_BUFFER_SZ];
-	word   w[MIPS_BUFFER_SZ >> 1];
-	word   l[BUFFER_SZ >> 2]; /* word is wrong, do not use! Use 'd' instead. */
-	dword  d[MIPS_BUFFER_SZ >> 2];
-} MIPS_BUFFER;
-#if !defined(MIPS_SCOM)
-struct pc_maint
-{
-	byte req;
-	byte rc;
-	POINTER_32BIT mem;
-	short length;
-	word port;
-	byte fill[6];
-	BUFFER data;
-};
-#else
-struct pc_maint
-{
-	byte req;
-	byte rc;
-	byte reserved[2];     /* R3000 alignment ... */
-	POINTER_32BIT mem;
-	short length;
-	word port;
-	byte fill[4];         /* data at offset 16   */
-	BUFFER data;
-};
-#endif
-struct mi_pc_maint
-{
-	byte req;
-	byte rc;
-	byte reserved[2];     /* R3000 alignment ... */
-	POINTER_32BIT mem;
-	short length;
-	word port;
-	byte fill[4];         /* data at offset 16   */
-	MIPS_BUFFER data;
-};
diff --git a/drivers/isdn/hardware/eicon/pkmaint.h b/drivers/isdn/hardware/eicon/pkmaint.h
deleted file mode 100644
index cf3fb14..0000000
--- a/drivers/isdn/hardware/eicon/pkmaint.h
+++ /dev/null
@@ -1,43 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_OS_DEPENDENT_PACK_MAIN_ON_BYTE_INC__
-#define __DIVA_XDI_OS_DEPENDENT_PACK_MAIN_ON_BYTE_INC__
-
-
-/*
-  Only one purpose of this compiler dependent file to pack
-  structures, described in pc_maint.h so that no padding
-  will be included.
-
-  With microsoft compile it is done by "pshpack1.h" and
-  after is restored by "poppack.h"
-*/
-
-
-#include "pc_maint.h"
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h
deleted file mode 100644
index 62e2073..0000000
--- a/drivers/isdn/hardware/eicon/platform.h
+++ /dev/null
@@ -1,369 +0,0 @@
-/* $Id: platform.h,v 1.37.4.6 2005/01/31 12:22:20 armin Exp $
- *
- * platform.h
- *
- *
- * Copyright 2000-2003  by Armin Schindler (mac@melware.de)
- * Copyright 2000  Eicon Networks
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-
-#ifndef	__PLATFORM_H__
-#define	__PLATFORM_H__
-
-#if !defined(DIVA_BUILD)
-#define DIVA_BUILD "local"
-#endif
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <asm/types.h>
-#include <asm/io.h>
-
-#include "cardtype.h"
-
-/* activate debuglib for modules only */
-#ifndef MODULE
-#define DIVA_NO_DEBUGLIB
-#endif
-
-#define DIVA_USER_MODE_CARD_CONFIG 1
-#define	USE_EXTENDED_DEBUGS 1
-
-#define MAX_ADAPTER     32
-
-#define DIVA_ISTREAM 1
-
-#define MEMORY_SPACE_TYPE  0
-#define PORT_SPACE_TYPE    1
-
-
-#include <linux/string.h>
-
-#ifndef	byte
-#define	byte   u8
-#endif
-
-#ifndef	word
-#define	word   u16
-#endif
-
-#ifndef	dword
-#define	dword  u32
-#endif
-
-#ifndef	qword
-#define	qword  u64
-#endif
-
-#ifndef	NULL
-#define	NULL	((void *) 0)
-#endif
-
-#ifndef	far
-#define far
-#endif
-
-#ifndef	_pascal
-#define _pascal
-#endif
-
-#ifndef	_loadds
-#define _loadds
-#endif
-
-#ifndef	_cdecl
-#define _cdecl
-#endif
-
-#define MEM_TYPE_RAM		0
-#define MEM_TYPE_PORT		1
-#define MEM_TYPE_PROM		2
-#define MEM_TYPE_CTLREG		3
-#define MEM_TYPE_RESET		4
-#define MEM_TYPE_CFG		5
-#define MEM_TYPE_ADDRESS	6
-#define MEM_TYPE_CONFIG		7
-#define MEM_TYPE_CONTROL	8
-
-#define MAX_MEM_TYPE		10
-
-#define DIVA_OS_MEM_ATTACH_RAM(a)	((a)->ram)
-#define DIVA_OS_MEM_ATTACH_PORT(a)	((a)->port)
-#define DIVA_OS_MEM_ATTACH_PROM(a)	((a)->prom)
-#define DIVA_OS_MEM_ATTACH_CTLREG(a)	((a)->ctlReg)
-#define DIVA_OS_MEM_ATTACH_RESET(a)	((a)->reset)
-#define DIVA_OS_MEM_ATTACH_CFG(a)	((a)->cfg)
-#define DIVA_OS_MEM_ATTACH_ADDRESS(a)	((a)->Address)
-#define DIVA_OS_MEM_ATTACH_CONFIG(a)	((a)->Config)
-#define DIVA_OS_MEM_ATTACH_CONTROL(a)	((a)->Control)
-
-#define DIVA_OS_MEM_DETACH_RAM(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_PORT(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_PROM(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_CTLREG(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_RESET(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_CFG(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_ADDRESS(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_CONFIG(a, x)	do { } while (0)
-#define DIVA_OS_MEM_DETACH_CONTROL(a, x)	do { } while (0)
-
-#define DIVA_INVALID_FILE_HANDLE  ((dword)(-1))
-
-#define DIVAS_CONTAINING_RECORD(address, type, field)			\
-	((type *)((char *)(address) - (char *)(&((type *)0)->field)))
-
-extern int sprintf(char *, const char *, ...);
-
-typedef void *LIST_ENTRY;
-
-typedef char DEVICE_NAME[64];
-typedef struct _ISDN_ADAPTER ISDN_ADAPTER;
-typedef struct _ISDN_ADAPTER *PISDN_ADAPTER;
-
-typedef void (*DIVA_DI_PRINTF)(unsigned char *, ...);
-#include "debuglib.h"
-
-#define dtrc(p) DBG_PRV0(p)
-#define dbug(a, p) DBG_PRV1(p)
-
-
-typedef struct e_info_s E_INFO;
-
-typedef char diva_os_dependent_devica_name_t[64];
-typedef void *PDEVICE_OBJECT;
-
-struct _diva_os_soft_isr;
-struct _diva_os_timer;
-struct _ISDN_ADAPTER;
-
-void diva_log_info(unsigned char *, ...);
-
-/*
-**  XDI DIDD Interface
-*/
-void diva_xdi_didd_register_adapter(int card);
-void diva_xdi_didd_remove_adapter(int card);
-
-/*
-** memory allocation
-*/
-static __inline__ void *diva_os_malloc(unsigned long flags, unsigned long size)
-{
-	void *ret = NULL;
-
-	if (size) {
-		ret = (void *) vmalloc((unsigned int) size);
-	}
-	return (ret);
-}
-static __inline__ void diva_os_free(unsigned long flags, void *ptr)
-{
-	vfree(ptr);
-}
-
-/*
-** use skbuffs for message buffer
-*/
-typedef struct sk_buff diva_os_message_buffer_s;
-diva_os_message_buffer_s *diva_os_alloc_message_buffer(unsigned long size, void **data_buf);
-void diva_os_free_message_buffer(diva_os_message_buffer_s *dmb);
-#define DIVA_MESSAGE_BUFFER_LEN(x) x->len
-#define DIVA_MESSAGE_BUFFER_DATA(x) x->data
-
-/*
-** mSeconds waiting
-*/
-static __inline__ void diva_os_sleep(dword mSec)
-{
-	msleep(mSec);
-}
-static __inline__ void diva_os_wait(dword mSec)
-{
-	mdelay(mSec);
-}
-
-/*
-**  PCI Configuration space access
-*/
-void PCIwrite(byte bus, byte func, int offset, void *data, int length, void *pci_dev_handle);
-void PCIread(byte bus, byte func, int offset, void *data, int length, void *pci_dev_handle);
-
-/*
-**  I/O Port utilities
-*/
-int diva_os_register_io_port(void *adapter, int reg, unsigned long port,
-			     unsigned long length, const char *name, int id);
-/*
-**  I/O port access abstraction
-*/
-byte inpp(void __iomem *);
-word inppw(void __iomem *);
-void inppw_buffer(void __iomem *, void *, int);
-void outppw(void __iomem *, word);
-void outppw_buffer(void __iomem * , void*, int);
-void outpp(void __iomem *, word);
-
-/*
-**  IRQ
-*/
-typedef struct _diva_os_adapter_irq_info {
-	byte irq_nr;
-	int  registered;
-	char irq_name[24];
-} diva_os_adapter_irq_info_t;
-int diva_os_register_irq(void *context, byte irq, const char *name);
-void diva_os_remove_irq(void *context, byte irq);
-
-#define diva_os_in_irq() in_irq()
-
-/*
-**  Spin Lock framework
-*/
-typedef long diva_os_spin_lock_magic_t;
-typedef spinlock_t diva_os_spin_lock_t;
-static __inline__ int diva_os_initialize_spin_lock(spinlock_t *lock, void *unused) { \
-	spin_lock_init(lock); return (0); }
-static __inline__ void diva_os_enter_spin_lock(diva_os_spin_lock_t *a, \
-					       diva_os_spin_lock_magic_t *old_irql, \
-					       void *dbg) { spin_lock_bh(a); }
-static __inline__ void diva_os_leave_spin_lock(diva_os_spin_lock_t *a, \
-					       diva_os_spin_lock_magic_t *old_irql, \
-					       void *dbg) { spin_unlock_bh(a); }
-
-#define diva_os_destroy_spin_lock(a, b) do { } while (0)
-
-/*
-**  Deffered processing framework
-*/
-typedef int (*diva_os_isr_callback_t)(struct _ISDN_ADAPTER *);
-typedef void (*diva_os_soft_isr_callback_t)(struct _diva_os_soft_isr *psoft_isr, void *context);
-
-typedef struct _diva_os_soft_isr {
-	void *object;
-	diva_os_soft_isr_callback_t callback;
-	void *callback_context;
-	char dpc_thread_name[24];
-} diva_os_soft_isr_t;
-
-int diva_os_initialize_soft_isr(diva_os_soft_isr_t *psoft_isr, diva_os_soft_isr_callback_t callback, void *callback_context);
-int diva_os_schedule_soft_isr(diva_os_soft_isr_t *psoft_isr);
-int diva_os_cancel_soft_isr(diva_os_soft_isr_t *psoft_isr);
-void diva_os_remove_soft_isr(diva_os_soft_isr_t *psoft_isr);
-
-/*
-  Get time service
-*/
-void diva_os_get_time(dword *sec, dword *usec);
-
-/*
-**  atomic operation, fake because we use threads
-*/
-typedef int diva_os_atomic_t;
-static inline diva_os_atomic_t
-diva_os_atomic_increment(diva_os_atomic_t *pv)
-{
-	*pv += 1;
-	return (*pv);
-}
-static inline diva_os_atomic_t
-diva_os_atomic_decrement(diva_os_atomic_t *pv)
-{
-	*pv -= 1;
-	return (*pv);
-}
-
-/*
-**  CAPI SECTION
-*/
-#define NO_CORNETN
-#define IMPLEMENT_DTMF 1
-#define IMPLEMENT_ECHO_CANCELLER 1
-#define IMPLEMENT_RTP 1
-#define IMPLEMENT_T38 1
-#define IMPLEMENT_FAX_SUB_SEP_PWD 1
-#define IMPLEMENT_V18 1
-#define IMPLEMENT_DTMF_TONE 1
-#define IMPLEMENT_PIAFS 1
-#define IMPLEMENT_FAX_PAPER_FORMATS 1
-#define IMPLEMENT_VOWN 1
-#define IMPLEMENT_CAPIDTMF 1
-#define IMPLEMENT_FAX_NONSTANDARD 1
-#define VSWITCH_SUPPORT 1
-
-#define IMPLEMENT_MARKED_OK_AFTER_FC 1
-
-#define DIVA_IDI_RX_DMA 1
-
-/*
-** endian macros
-**
-** If only...  In some cases we did use them for endianness conversion;
-** unfortunately, other uses were real iomem accesses.
-*/
-#define READ_BYTE(addr)   readb(addr)
-#define READ_WORD(addr)   readw(addr)
-#define READ_DWORD(addr)  readl(addr)
-
-#define WRITE_BYTE(addr, v)  writeb(v, addr)
-#define WRITE_WORD(addr, v)  writew(v, addr)
-#define WRITE_DWORD(addr, v) writel(v, addr)
-
-static inline __u16 GET_WORD(void *addr)
-{
-	return le16_to_cpu(*(__le16 *)addr);
-}
-static inline __u32 GET_DWORD(void *addr)
-{
-	return le32_to_cpu(*(__le32 *)addr);
-}
-static inline void PUT_WORD(void *addr, __u16 v)
-{
-	*(__le16 *)addr = cpu_to_le16(v);
-}
-static inline void PUT_DWORD(void *addr, __u32 v)
-{
-	*(__le32 *)addr = cpu_to_le32(v);
-}
-
-/*
-** 32/64 bit macors
-*/
-#ifdef BITS_PER_LONG
-#if BITS_PER_LONG > 32
-#define PLATFORM_GT_32BIT
-#define ULongToPtr(x) (void *)(unsigned long)(x)
-#endif
-#endif
-
-/*
-** undef os definitions of macros we use
-*/
-#undef ID_MASK
-#undef N_DATA
-#undef ADDR
-
-/*
-** dump file
-*/
-#define diva_os_dump_file_t char
-#define diva_os_board_trace_t char
-#define diva_os_dump_file(__x__) do { } while (0)
-
-/*
-** size of internal arrays
-*/
-#define MAX_DESCRIPTORS 64
-
-#endif	/* __PLATFORM_H__ */
diff --git a/drivers/isdn/hardware/eicon/pr_pc.h b/drivers/isdn/hardware/eicon/pr_pc.h
deleted file mode 100644
index a08d6d5..0000000
--- a/drivers/isdn/hardware/eicon/pr_pc.h
+++ /dev/null
@@ -1,76 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-struct pr_ram {
-	word NextReq;         /* pointer to next Req Buffer               */
-	word NextRc;          /* pointer to next Rc Buffer                */
-	word NextInd;         /* pointer to next Ind Buffer               */
-	byte ReqInput;        /* number of Req Buffers sent               */
-	byte ReqOutput;       /* number of Req Buffers returned           */
-	byte ReqReserved;     /* number of Req Buffers reserved           */
-	byte Int;             /* ISDN-P interrupt                         */
-	byte XLock;           /* Lock field for arbitration               */
-	byte RcOutput;        /* number of Rc buffers received            */
-	byte IndOutput;       /* number of Ind buffers received           */
-	byte IMask;           /* Interrupt Mask Flag                      */
-	byte Reserved1[2];    /* reserved field, do not use               */
-	byte ReadyInt;        /* request field for ready interrupt        */
-	byte Reserved2[12];   /* reserved field, do not use               */
-	byte InterfaceType;   /* interface type 1=16K interface           */
-	word Signature;       /* ISDN-P initialized indication            */
-	byte B[1];            /* buffer space for Req,Ind and Rc          */
-};
-typedef struct {
-	word next;
-	byte Req;
-	byte ReqId;
-	byte ReqCh;
-	byte Reserved1;
-	word Reference;
-	byte Reserved[8];
-	PBUFFER XBuffer;
-} REQ;
-typedef struct {
-	word next;
-	byte Rc;
-	byte RcId;
-	byte RcCh;
-	byte Reserved1;
-	word Reference;
-	byte Reserved2[8];
-} RC;
-typedef struct {
-	word next;
-	byte Ind;
-	byte IndId;
-	byte IndCh;
-	byte MInd;
-	word MLength;
-	word Reference;
-	byte RNR;
-	byte Reserved;
-	dword Ack;
-	PBUFFER RBuffer;
-} IND;
diff --git a/drivers/isdn/hardware/eicon/s_4bri.c b/drivers/isdn/hardware/eicon/s_4bri.c
deleted file mode 100644
index ec12165..0000000
--- a/drivers/isdn/hardware/eicon/s_4bri.c
+++ /dev/null
@@ -1,510 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "divasync.h"
-#include "pc_init.h"
-#include "io.h"
-#include "helpers.h"
-#include "dsrv4bri.h"
-#include "dsp_defs.h"
-#include "sdp_hdr.h"
-
-/*****************************************************************************/
-#define	MAX_XLOG_SIZE	(64 * 1024)
-
-/* --------------------------------------------------------------------------
-   Recovery XLOG from QBRI Card
-   -------------------------------------------------------------------------- */
-static void qBri_cpu_trapped(PISDN_ADAPTER IoAdapter) {
-	byte  __iomem *base;
-	word *Xlog;
-	dword   regs[4], TrapID, offset, size;
-	Xdesc   xlogDesc;
-	int factor = (IoAdapter->tasks == 1) ? 1 : 2;
-
-/*
- *	check for trapped MIPS 46xx CPU, dump exception frame
- */
-
-	base = DIVA_OS_MEM_ATTACH_CONTROL(IoAdapter);
-	offset = IoAdapter->ControllerNumber * (IoAdapter->MemorySize >> factor);
-
-	TrapID = READ_DWORD(&base[0x80]);
-
-	if ((TrapID == 0x99999999) || (TrapID == 0x99999901))
-	{
-		dump_trap_frame(IoAdapter, &base[0x90]);
-		IoAdapter->trapped = 1;
-	}
-
-	regs[0] = READ_DWORD((base + offset) + 0x70);
-	regs[1] = READ_DWORD((base + offset) + 0x74);
-	regs[2] = READ_DWORD((base + offset) + 0x78);
-	regs[3] = READ_DWORD((base + offset) + 0x7c);
-	regs[0] &= IoAdapter->MemorySize - 1;
-
-	if ((regs[0] >= offset)
-	    && (regs[0] < offset + (IoAdapter->MemorySize >> factor) - 1))
-	{
-		if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) {
-			DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
-			return;
-		}
-
-		size = offset + (IoAdapter->MemorySize >> factor) - regs[0];
-		if (size > MAX_XLOG_SIZE)
-			size = MAX_XLOG_SIZE;
-		memcpy_fromio(Xlog, &base[regs[0]], size);
-		xlogDesc.buf = Xlog;
-		xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]);
-		xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]);
-		dump_xlog_buffer(IoAdapter, &xlogDesc);
-		diva_os_free(0, Xlog);
-		IoAdapter->trapped = 2;
-	}
-	DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
-}
-
-/* --------------------------------------------------------------------------
-   Reset QBRI Hardware
-   -------------------------------------------------------------------------- */
-static void reset_qBri_hardware(PISDN_ADAPTER IoAdapter) {
-	word volatile __iomem *qBriReset;
-	byte  volatile __iomem *qBriCntrl;
-	byte  volatile __iomem *p;
-
-	qBriReset = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
-	WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_SOFT_RESET);
-	diva_os_wait(1);
-	WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_SOFT_RESET);
-	diva_os_wait(1);
-	WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_RELOAD_EEPROM);
-	diva_os_wait(1);
-	WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_RELOAD_EEPROM);
-	diva_os_wait(1);
-	DIVA_OS_MEM_DETACH_PROM(IoAdapter, qBriReset);
-
-	qBriCntrl = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	p = &qBriCntrl[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
-	WRITE_DWORD(p, 0);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, qBriCntrl);
-
-	DBG_TRC(("resetted board @ reset addr 0x%08lx", qBriReset))
-		DBG_TRC(("resetted board @ cntrl addr 0x%08lx", p))
-		}
-
-/* --------------------------------------------------------------------------
-   Start Card CPU
-   -------------------------------------------------------------------------- */
-void start_qBri_hardware(PISDN_ADAPTER IoAdapter) {
-	byte volatile __iomem *qBriReset;
-	byte volatile __iomem *p;
-
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	qBriReset = &p[(DIVA_4BRI_REVISION(IoAdapter)) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
-	WRITE_DWORD(qBriReset, MQ_RISC_COLD_RESET_MASK);
-	diva_os_wait(2);
-	WRITE_DWORD(qBriReset, MQ_RISC_WARM_RESET_MASK | MQ_RISC_COLD_RESET_MASK);
-	diva_os_wait(10);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
-	DBG_TRC(("started processor @ addr 0x%08lx", qBriReset))
-		}
-
-/* --------------------------------------------------------------------------
-   Stop Card CPU
-   -------------------------------------------------------------------------- */
-static void stop_qBri_hardware(PISDN_ADAPTER IoAdapter) {
-	byte volatile __iomem *p;
-	dword volatile __iomem *qBriReset;
-	dword volatile __iomem *qBriIrq;
-	dword volatile __iomem *qBriIsacDspReset;
-	int rev2 = DIVA_4BRI_REVISION(IoAdapter);
-	int reset_offset = rev2 ? (MQ2_BREG_RISC)      : (MQ_BREG_RISC);
-	int irq_offset   = rev2 ? (MQ2_BREG_IRQ_TEST)  : (MQ_BREG_IRQ_TEST);
-	int hw_offset    = rev2 ? (MQ2_ISAC_DSP_RESET) : (MQ_ISAC_DSP_RESET);
-
-	if (IoAdapter->ControllerNumber > 0)
-		return;
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	qBriReset = (dword volatile __iomem *)&p[reset_offset];
-	qBriIsacDspReset = (dword volatile __iomem *)&p[hw_offset];
-/*
- *	clear interrupt line (reset Local Interrupt Test Register)
- */
-	WRITE_DWORD(qBriReset, 0);
-	WRITE_DWORD(qBriIsacDspReset, 0);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
-	p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	WRITE_BYTE(&p[PLX9054_INTCSR], 0x00);	/* disable PCI interrupts */
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	qBriIrq   = (dword volatile __iomem *)&p[irq_offset];
-	WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
-	DBG_TRC(("stopped processor @ addr 0x%08lx", qBriReset))
-
-		}
-
-/* --------------------------------------------------------------------------
-   FPGA download
-   -------------------------------------------------------------------------- */
-#define FPGA_NAME_OFFSET         0x10
-
-static byte *qBri_check_FPGAsrc(PISDN_ADAPTER IoAdapter, char *FileName,
-				dword *Length, dword *code) {
-	byte *File;
-	char *fpgaFile, *fpgaType, *fpgaDate, *fpgaTime;
-	dword fpgaFlen, fpgaTlen, fpgaDlen, cnt, year, i;
-
-	if (!(File = (byte *)xdiLoadFile(FileName, Length, 0))) {
-		return (NULL);
-	}
-/*
- *	 scan file until FF and put id string into buffer
- */
-	for (i = 0; File[i] != 0xff;)
-	{
-		if (++i >= *Length)
-		{
-			DBG_FTL(("FPGA download: start of data header not found"))
-				xdiFreeFile(File);
-			return (NULL);
-		}
-	}
-	*code = i++;
-
-	if ((File[i] & 0xF0) != 0x20)
-	{
-		DBG_FTL(("FPGA download: data header corrupted"))
-			xdiFreeFile(File);
-		return (NULL);
-	}
-	fpgaFlen = (dword)File[FPGA_NAME_OFFSET - 1];
-	if (fpgaFlen == 0)
-		fpgaFlen = 12;
-	fpgaFile = (char *)&File[FPGA_NAME_OFFSET];
-	fpgaTlen = (dword)fpgaFile[fpgaFlen + 2];
-	if (fpgaTlen == 0)
-		fpgaTlen = 10;
-	fpgaType = (char *)&fpgaFile[fpgaFlen + 3];
-	fpgaDlen = (dword)  fpgaType[fpgaTlen + 2];
-	if (fpgaDlen == 0)
-		fpgaDlen = 11;
-	fpgaDate = (char *)&fpgaType[fpgaTlen + 3];
-	fpgaTime = (char *)&fpgaDate[fpgaDlen + 3];
-	cnt = (dword)(((File[i] & 0x0F) << 20) + (File[i + 1] << 12)
-		      + (File[i + 2] << 4) + (File[i + 3] >> 4));
-
-	if ((dword)(i + (cnt / 8)) > *Length)
-	{
-		DBG_FTL(("FPGA download: '%s' file too small (%ld < %ld)",
-			 FileName, *Length, code + ((cnt + 7) / 8)))
-			xdiFreeFile(File);
-		return (NULL);
-	}
-	i = 0;
-	do
-	{
-		while ((fpgaDate[i] != '\0')
-		       && ((fpgaDate[i] < '0') || (fpgaDate[i] > '9')))
-		{
-			i++;
-		}
-		year = 0;
-		while ((fpgaDate[i] >= '0') && (fpgaDate[i] <= '9'))
-			year = year * 10 + (fpgaDate[i++] - '0');
-	} while ((year < 2000) && (fpgaDate[i] != '\0'));
-
-	switch (IoAdapter->cardType) {
-	case CARDTYPE_DIVASRV_B_2F_PCI:
-		break;
-
-	default:
-		if (year >= 2001) {
-			IoAdapter->fpga_features |= PCINIT_FPGA_PLX_ACCESS_SUPPORTED;
-		}
-	}
-
-	DBG_LOG(("FPGA[%s] file %s (%s %s) len %d",
-		 fpgaType, fpgaFile, fpgaDate, fpgaTime, cnt))
-		return (File);
-}
-
-/******************************************************************************/
-
-#define FPGA_PROG   0x0001		/* PROG enable low */
-#define FPGA_BUSY   0x0002		/* BUSY high, DONE low */
-#define	FPGA_CS     0x000C		/* Enable I/O pins */
-#define FPGA_CCLK   0x0100
-#define FPGA_DOUT   0x0400
-#define FPGA_DIN    FPGA_DOUT   /* bidirectional I/O */
-
-int qBri_FPGA_download(PISDN_ADAPTER IoAdapter) {
-	int            bit;
-	byte           *File;
-	dword          code, FileLength;
-	word volatile __iomem *addr = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
-	word           val, baseval = FPGA_CS | FPGA_PROG;
-
-
-
-	if (DIVA_4BRI_REVISION(IoAdapter))
-	{
-		char *name;
-
-		switch (IoAdapter->cardType) {
-		case CARDTYPE_DIVASRV_B_2F_PCI:
-			name = "dsbri2f.bit";
-			break;
-
-		case CARDTYPE_DIVASRV_B_2M_V2_PCI:
-		case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
-			name = "dsbri2m.bit";
-			break;
-
-		default:
-			name = "ds4bri2.bit";
-		}
-
-		File = qBri_check_FPGAsrc(IoAdapter, name,
-					  &FileLength, &code);
-	}
-	else
-	{
-		File = qBri_check_FPGAsrc(IoAdapter, "ds4bri.bit",
-					  &FileLength, &code);
-	}
-	if (!File) {
-		DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
-		return (0);
-	}
-/*
- *	prepare download, pulse PROGRAM pin down.
- */
-	WRITE_WORD(addr, baseval & ~FPGA_PROG); /* PROGRAM low pulse */
-	WRITE_WORD(addr, baseval);              /* release */
-	diva_os_wait(50);  /* wait until FPGA finished internal memory clear */
-/*
- *	check done pin, must be low
- */
-	if (READ_WORD(addr) & FPGA_BUSY)
-	{
-		DBG_FTL(("FPGA download: acknowledge for FPGA memory clear missing"))
-			xdiFreeFile(File);
-		DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
-		return (0);
-	}
-/*
- *	put data onto the FPGA
- */
-	while (code < FileLength)
-	{
-		val = ((word)File[code++]) << 3;
-
-		for (bit = 8; bit-- > 0; val <<= 1) /* put byte onto FPGA */
-		{
-			baseval &= ~FPGA_DOUT;             /* clr  data bit */
-			baseval |= (val & FPGA_DOUT);      /* copy data bit */
-			WRITE_WORD(addr, baseval);
-			WRITE_WORD(addr, baseval | FPGA_CCLK);     /* set CCLK hi */
-			WRITE_WORD(addr, baseval | FPGA_CCLK);     /* set CCLK hi */
-			WRITE_WORD(addr, baseval);                 /* set CCLK lo */
-		}
-	}
-	xdiFreeFile(File);
-	diva_os_wait(100);
-	val = READ_WORD(addr);
-
-	DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
-
-	if (!(val & FPGA_BUSY))
-	{
-		DBG_FTL(("FPGA download: chip remains in busy state (0x%04x)", val))
-			return (0);
-	}
-
-	return (1);
-}
-
-static int load_qBri_hardware(PISDN_ADAPTER IoAdapter) {
-	return (0);
-}
-
-/* --------------------------------------------------------------------------
-   Card ISR
-   -------------------------------------------------------------------------- */
-static int qBri_ISR(struct _ISDN_ADAPTER *IoAdapter) {
-	dword volatile     __iomem *qBriIrq;
-
-	PADAPTER_LIST_ENTRY QuadroList = IoAdapter->QuadroList;
-
-	word			i;
-	int			serviced = 0;
-	byte __iomem *p;
-
-	p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-
-	if (!(READ_BYTE(&p[PLX9054_INTCSR]) & 0x80)) {
-		DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-		return (0);
-	}
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
-/*
- *	clear interrupt line (reset Local Interrupt Test Register)
- */
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST)  : (MQ_BREG_IRQ_TEST)]);
-	WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
-	for (i = 0; i < IoAdapter->tasks; ++i)
-	{
-		IoAdapter = QuadroList->QuadroAdapter[i];
-
-		if (IoAdapter && IoAdapter->Initialized
-		    && IoAdapter->tst_irq(&IoAdapter->a))
-		{
-			IoAdapter->IrqCount++;
-			serviced = 1;
-			diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr);
-		}
-	}
-
-	return (serviced);
-}
-
-/* --------------------------------------------------------------------------
-   Does disable the interrupt on the card
-   -------------------------------------------------------------------------- */
-static void disable_qBri_interrupt(PISDN_ADAPTER IoAdapter) {
-	dword volatile __iomem *qBriIrq;
-	byte __iomem *p;
-
-	if (IoAdapter->ControllerNumber > 0)
-		return;
-/*
- *	clear interrupt line (reset Local Interrupt Test Register)
- */
-	p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	WRITE_BYTE(&p[PLX9054_INTCSR], 0x00);	/* disable PCI interrupts */
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST)  : (MQ_BREG_IRQ_TEST)]);
-	WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-
-/* --------------------------------------------------------------------------
-   Install Adapter Entry Points
-   -------------------------------------------------------------------------- */
-static void set_common_qBri_functions(PISDN_ADAPTER IoAdapter) {
-	ADAPTER *a;
-
-	a = &IoAdapter->a;
-
-	a->ram_in           = mem_in;
-	a->ram_inw          = mem_inw;
-	a->ram_in_buffer    = mem_in_buffer;
-	a->ram_look_ahead   = mem_look_ahead;
-	a->ram_out          = mem_out;
-	a->ram_outw         = mem_outw;
-	a->ram_out_buffer   = mem_out_buffer;
-	a->ram_inc          = mem_inc;
-
-	IoAdapter->out = pr_out;
-	IoAdapter->dpc = pr_dpc;
-	IoAdapter->tst_irq = scom_test_int;
-	IoAdapter->clr_irq  = scom_clear_int;
-	IoAdapter->pcm  = (struct pc_maint *)MIPS_MAINT_OFFS;
-
-	IoAdapter->load = load_qBri_hardware;
-
-	IoAdapter->disIrq = disable_qBri_interrupt;
-	IoAdapter->rstFnc = reset_qBri_hardware;
-	IoAdapter->stop = stop_qBri_hardware;
-	IoAdapter->trapFnc = qBri_cpu_trapped;
-
-	IoAdapter->diva_isr_handler = qBri_ISR;
-
-	IoAdapter->a.io = (void *)IoAdapter;
-}
-
-static void set_qBri_functions(PISDN_ADAPTER IoAdapter) {
-	if (!IoAdapter->tasks) {
-		IoAdapter->tasks = MQ_INSTANCE_COUNT;
-	}
-	IoAdapter->MemorySize = MQ_MEMORY_SIZE;
-	set_common_qBri_functions(IoAdapter);
-	diva_os_set_qBri_functions(IoAdapter);
-}
-
-static void set_qBri2_functions(PISDN_ADAPTER IoAdapter) {
-	if (!IoAdapter->tasks) {
-		IoAdapter->tasks = MQ_INSTANCE_COUNT;
-	}
-	IoAdapter->MemorySize = (IoAdapter->tasks == 1) ? BRI2_MEMORY_SIZE : MQ2_MEMORY_SIZE;
-	set_common_qBri_functions(IoAdapter);
-	diva_os_set_qBri2_functions(IoAdapter);
-}
-
-/******************************************************************************/
-
-void prepare_qBri_functions(PISDN_ADAPTER IoAdapter) {
-
-	set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[0]);
-	set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[1]);
-	set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[2]);
-	set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[3]);
-
-}
-
-void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter) {
-	if (!IoAdapter->tasks) {
-		IoAdapter->tasks = MQ_INSTANCE_COUNT;
-	}
-
-	set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[0]);
-	if (IoAdapter->tasks > 1) {
-		set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[1]);
-		set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[2]);
-		set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[3]);
-	}
-
-}
-
-/* -------------------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/eicon/s_bri.c b/drivers/isdn/hardware/eicon/s_bri.c
deleted file mode 100644
index 6a5bb74..0000000
--- a/drivers/isdn/hardware/eicon/s_bri.c
+++ /dev/null
@@ -1,191 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "divasync.h"
-#include "io.h"
-#include "helpers.h"
-#include "dsrv_bri.h"
-#include "dsp_defs.h"
-/*****************************************************************************/
-#define MAX_XLOG_SIZE (64 * 1024)
-/* --------------------------------------------------------------------------
-   Investigate card state, recovery trace buffer
-   -------------------------------------------------------------------------- */
-static void bri_cpu_trapped(PISDN_ADAPTER IoAdapter) {
-	byte  __iomem *addrHi, *addrLo, *ioaddr;
-	word *Xlog;
-	dword   regs[4], i, size;
-	Xdesc   xlogDesc;
-	byte __iomem *Port;
-/*
- * first read pointers and trap frame
- */
-	if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE)))
-		return;
-	Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
-	addrHi = Port + ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
-	addrLo = Port + ADDR;
-	ioaddr = Port + DATA;
-	outpp(addrHi,  0);
-	outppw(addrLo, 0);
-	for (i = 0; i < 0x100; Xlog[i++] = inppw(ioaddr));
-/*
- * check for trapped MIPS 3xxx CPU, dump only exception frame
- */
-	if (GET_DWORD(&Xlog[0x80 / sizeof(Xlog[0])]) == 0x99999999)
-	{
-		dump_trap_frame(IoAdapter, &((byte *)Xlog)[0x90]);
-		IoAdapter->trapped = 1;
-	}
-	regs[0] = GET_DWORD(&((byte *)Xlog)[0x70]);
-	regs[1] = GET_DWORD(&((byte *)Xlog)[0x74]);
-	regs[2] = GET_DWORD(&((byte *)Xlog)[0x78]);
-	regs[3] = GET_DWORD(&((byte *)Xlog)[0x7c]);
-	outpp(addrHi, (regs[1] >> 16) & 0x7F);
-	outppw(addrLo, regs[1] & 0xFFFF);
-	xlogDesc.cnt = inppw(ioaddr);
-	outpp(addrHi, (regs[2] >> 16) & 0x7F);
-	outppw(addrLo, regs[2] & 0xFFFF);
-	xlogDesc.out = inppw(ioaddr);
-	xlogDesc.buf = Xlog;
-	regs[0] &= IoAdapter->MemorySize - 1;
-	if ((regs[0] < IoAdapter->MemorySize - 1))
-	{
-		size = IoAdapter->MemorySize - regs[0];
-		if (size > MAX_XLOG_SIZE)
-			size = MAX_XLOG_SIZE;
-		for (i = 0; i < (size / sizeof(*Xlog)); regs[0] += 2)
-		{
-			outpp(addrHi, (regs[0] >> 16) & 0x7F);
-			outppw(addrLo, regs[0] & 0xFFFF);
-			Xlog[i++] = inppw(ioaddr);
-		}
-		dump_xlog_buffer(IoAdapter, &xlogDesc);
-		diva_os_free(0, Xlog);
-		IoAdapter->trapped = 2;
-	}
-	outpp(addrHi, (byte)((BRI_UNCACHED_ADDR(IoAdapter->MemoryBase + IoAdapter->MemorySize -
-						BRI_SHARED_RAM_SIZE)) >> 16));
-	outppw(addrLo, 0x00);
-	DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-}
-/* ---------------------------------------------------------------------
-   Reset hardware
-   --------------------------------------------------------------------- */
-static void reset_bri_hardware(PISDN_ADAPTER IoAdapter) {
-	byte __iomem *p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	outpp(p, 0x00);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-/* ---------------------------------------------------------------------
-   Halt system
-   --------------------------------------------------------------------- */
-static void stop_bri_hardware(PISDN_ADAPTER IoAdapter) {
-	byte __iomem *p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	if (p) {
-		outpp(p, 0x00); /* disable interrupts ! */
-	}
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	outpp(p, 0x00);    /* clear int, halt cpu */
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-static int load_bri_hardware(PISDN_ADAPTER IoAdapter) {
-	return (0);
-}
-/******************************************************************************/
-static int bri_ISR(struct _ISDN_ADAPTER *IoAdapter) {
-	byte __iomem *p;
-
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	if (!(inpp(p) & 0x01)) {
-		DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-		return (0);
-	}
-	/*
-	  clear interrupt line
-	*/
-	outpp(p, 0x08);
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-	IoAdapter->IrqCount++;
-	if (IoAdapter->Initialized) {
-		diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr);
-	}
-	return (1);
-}
-/* --------------------------------------------------------------------------
-   Disable IRQ in the card hardware
-   -------------------------------------------------------------------------- */
-static void disable_bri_interrupt(PISDN_ADAPTER IoAdapter) {
-	byte __iomem *p;
-	p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	if (p)
-	{
-		outpp(p, 0x00); /* disable interrupts ! */
-	}
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-	p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
-	outpp(p, 0x00); /* clear int, halt cpu */
-	DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-/* -------------------------------------------------------------------------
-   Fill card entry points
-   ------------------------------------------------------------------------- */
-void prepare_maestra_functions(PISDN_ADAPTER IoAdapter) {
-	ADAPTER *a = &IoAdapter->a;
-	a->ram_in             = io_in;
-	a->ram_inw            = io_inw;
-	a->ram_in_buffer      = io_in_buffer;
-	a->ram_look_ahead     = io_look_ahead;
-	a->ram_out            = io_out;
-	a->ram_outw           = io_outw;
-	a->ram_out_buffer     = io_out_buffer;
-	a->ram_inc            = io_inc;
-	IoAdapter->MemoryBase = BRI_MEMORY_BASE;
-	IoAdapter->MemorySize = BRI_MEMORY_SIZE;
-	IoAdapter->out        = pr_out;
-	IoAdapter->dpc        = pr_dpc;
-	IoAdapter->tst_irq    = scom_test_int;
-	IoAdapter->clr_irq    = scom_clear_int;
-	IoAdapter->pcm        = (struct pc_maint *)MIPS_MAINT_OFFS;
-	IoAdapter->load       = load_bri_hardware;
-	IoAdapter->disIrq     = disable_bri_interrupt;
-	IoAdapter->rstFnc     = reset_bri_hardware;
-	IoAdapter->stop       = stop_bri_hardware;
-	IoAdapter->trapFnc    = bri_cpu_trapped;
-	IoAdapter->diva_isr_handler = bri_ISR;
-	/*
-	  Prepare OS dependent functions
-	*/
-	diva_os_prepare_maestra_functions(IoAdapter);
-}
-/* -------------------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/eicon/s_pri.c b/drivers/isdn/hardware/eicon/s_pri.c
deleted file mode 100644
index ddd0e0e..0000000
--- a/drivers/isdn/hardware/eicon/s_pri.c
+++ /dev/null
@@ -1,205 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "divasync.h"
-#include "io.h"
-#include "helpers.h"
-#include "dsrv_pri.h"
-#include "dsp_defs.h"
-/*****************************************************************************/
-#define MAX_XLOG_SIZE  (64 * 1024)
-/* -------------------------------------------------------------------------
-   Does return offset between ADAPTER->ram and real begin of memory
-   ------------------------------------------------------------------------- */
-static dword pri_ram_offset(ADAPTER *a) {
-	return ((dword)MP_SHARED_RAM_OFFSET);
-}
-/* -------------------------------------------------------------------------
-   Recovery XLOG buffer from the card
-   ------------------------------------------------------------------------- */
-static void pri_cpu_trapped(PISDN_ADAPTER IoAdapter) {
-	byte  __iomem *base;
-	word *Xlog;
-	dword   regs[4], TrapID, size;
-	Xdesc   xlogDesc;
-/*
- * check for trapped MIPS 46xx CPU, dump exception frame
- */
-	base   = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
-	TrapID = READ_DWORD(&base[0x80]);
-	if ((TrapID == 0x99999999) || (TrapID == 0x99999901))
-	{
-		dump_trap_frame(IoAdapter, &base[0x90]);
-		IoAdapter->trapped = 1;
-	}
-	regs[0] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x70]);
-	regs[1] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x74]);
-	regs[2] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x78]);
-	regs[3] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x7c]);
-	regs[0] &= IoAdapter->MemorySize - 1;
-	if ((regs[0] < IoAdapter->MemorySize - 1))
-	{
-		if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) {
-			DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, base);
-			return;
-		}
-		size = IoAdapter->MemorySize - regs[0];
-		if (size > MAX_XLOG_SIZE)
-			size = MAX_XLOG_SIZE;
-		memcpy_fromio(Xlog, &base[regs[0]], size);
-		xlogDesc.buf = Xlog;
-		xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]);
-		xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]);
-		dump_xlog_buffer(IoAdapter, &xlogDesc);
-		diva_os_free(0, Xlog);
-		IoAdapter->trapped = 2;
-	}
-	DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, base);
-}
-/* -------------------------------------------------------------------------
-   Hardware reset of PRI card
-   ------------------------------------------------------------------------- */
-static void reset_pri_hardware(PISDN_ADAPTER IoAdapter) {
-	byte __iomem *p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2);
-	diva_os_wait(50);
-	WRITE_BYTE(p, 0x00);
-	diva_os_wait(50);
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-}
-/* -------------------------------------------------------------------------
-   Stop Card Hardware
-   ------------------------------------------------------------------------- */
-static void stop_pri_hardware(PISDN_ADAPTER IoAdapter) {
-	dword i;
-	byte __iomem *p;
-	dword volatile __iomem *cfgReg = (void __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
-	WRITE_DWORD(&cfgReg[3], 0);
-	WRITE_DWORD(&cfgReg[1], 0);
-	DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg);
-	IoAdapter->a.ram_out(&IoAdapter->a, &RAM->SWReg, SWREG_HALT_CPU);
-	i = 0;
-	while ((i < 100) && (IoAdapter->a.ram_in(&IoAdapter->a, &RAM->SWReg) != 0))
-	{
-		diva_os_wait(1);
-		i++;
-	}
-	DBG_TRC(("%s: PRI stopped (%d)", IoAdapter->Name, i))
-		cfgReg = (void __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
-	WRITE_DWORD(&cfgReg[0], ((dword)(~0x03E00000)));
-	DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg);
-	diva_os_wait(1);
-	p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-	WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2);
-	DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-}
-static int load_pri_hardware(PISDN_ADAPTER IoAdapter) {
-	return (0);
-}
-/* --------------------------------------------------------------------------
-   PRI Adapter interrupt Service Routine
-   -------------------------------------------------------------------------- */
-static int pri_ISR(struct _ISDN_ADAPTER *IoAdapter) {
-	byte __iomem *cfg = DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
-	if (!(READ_DWORD(cfg) & 0x80000000)) {
-		DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfg);
-		return (0);
-	}
-	/*
-	  clear interrupt line
-	*/
-	WRITE_DWORD(cfg, (dword)~0x03E00000);
-	DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfg);
-	IoAdapter->IrqCount++;
-	if (IoAdapter->Initialized)
-	{
-		diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr);
-	}
-	return (1);
-}
-/* -------------------------------------------------------------------------
-   Disable interrupt in the card hardware
-   ------------------------------------------------------------------------- */
-static void disable_pri_interrupt(PISDN_ADAPTER IoAdapter) {
-	dword volatile __iomem *cfgReg = (dword volatile __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
-	WRITE_DWORD(&cfgReg[3], 0);
-	WRITE_DWORD(&cfgReg[1], 0);
-	WRITE_DWORD(&cfgReg[0], (dword)(~0x03E00000));
-	DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg);
-}
-/* -------------------------------------------------------------------------
-   Install entry points for PRI Adapter
-   ------------------------------------------------------------------------- */
-static void prepare_common_pri_functions(PISDN_ADAPTER IoAdapter) {
-	ADAPTER *a = &IoAdapter->a;
-	a->ram_in           = mem_in;
-	a->ram_inw          = mem_inw;
-	a->ram_in_buffer    = mem_in_buffer;
-	a->ram_look_ahead   = mem_look_ahead;
-	a->ram_out          = mem_out;
-	a->ram_outw         = mem_outw;
-	a->ram_out_buffer   = mem_out_buffer;
-	a->ram_inc          = mem_inc;
-	a->ram_offset       = pri_ram_offset;
-	a->ram_out_dw    = mem_out_dw;
-	a->ram_in_dw    = mem_in_dw;
-	a->istream_wakeup   = pr_stream;
-	IoAdapter->out      = pr_out;
-	IoAdapter->dpc      = pr_dpc;
-	IoAdapter->tst_irq  = scom_test_int;
-	IoAdapter->clr_irq  = scom_clear_int;
-	IoAdapter->pcm      = (struct pc_maint *)(MIPS_MAINT_OFFS
-						  - MP_SHARED_RAM_OFFSET);
-	IoAdapter->load     = load_pri_hardware;
-	IoAdapter->disIrq   = disable_pri_interrupt;
-	IoAdapter->rstFnc   = reset_pri_hardware;
-	IoAdapter->stop     = stop_pri_hardware;
-	IoAdapter->trapFnc  = pri_cpu_trapped;
-	IoAdapter->diva_isr_handler = pri_ISR;
-}
-/* -------------------------------------------------------------------------
-   Install entry points for PRI Adapter
-   ------------------------------------------------------------------------- */
-void prepare_pri_functions(PISDN_ADAPTER IoAdapter) {
-	IoAdapter->MemorySize = MP_MEMORY_SIZE;
-	prepare_common_pri_functions(IoAdapter);
-	diva_os_prepare_pri_functions(IoAdapter);
-}
-/* -------------------------------------------------------------------------
-   Install entry points for PRI Rev.2 Adapter
-   ------------------------------------------------------------------------- */
-void prepare_pri2_functions(PISDN_ADAPTER IoAdapter) {
-	IoAdapter->MemorySize = MP2_MEMORY_SIZE;
-	prepare_common_pri_functions(IoAdapter);
-	diva_os_prepare_pri2_functions(IoAdapter);
-}
-/* ------------------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/eicon/sdp_hdr.h b/drivers/isdn/hardware/eicon/sdp_hdr.h
deleted file mode 100644
index 5e20f8d..0000000
--- a/drivers/isdn/hardware/eicon/sdp_hdr.h
+++ /dev/null
@@ -1,117 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_SOFT_DSP_TASK_ENTRY_H__
-#define __DIVA_SOFT_DSP_TASK_ENTRY_H__
-/*
-  The soft DSP image is described by binary header contained on begin of this
-  image:
-  OFFSET FROM IMAGE START |  VARIABLE
-  ------------------------------------------------------------------------
-  DIVA_MIPS_TASK_IMAGE_LINK_OFFS   |  link to the next image
-  ----------------------------------------------------------------------
-  DIVA_MIPS_TASK_IMAGE_GP_OFFS    |  image gp register value, void*
-  ----------------------------------------------------------------------
-  DIVA_MIPS_TASK_IMAGE_ENTRY_OFFS   |  diva_mips_sdp_task_entry_t*
-  ----------------------------------------------------------------------
-  DIVA_MIPS_TASK_IMAGE_LOAD_ADDR_OFFS |  image image start address (void*)
-  ----------------------------------------------------------------------
-  DIVA_MIPS_TASK_IMAGE_END_ADDR_OFFS |  image image end address   (void*)
-  ----------------------------------------------------------------------
-  DIVA_MIPS_TASK_IMAGE_ID_STRING_OFFS |  image id string char[...];
-  ----------------------------------------------------------------------
-*/
-#define DIVA_MIPS_TASK_IMAGE_LINK_OFFS   0x6C
-#define DIVA_MIPS_TASK_IMAGE_GP_OFFS    0x70
-#define DIVA_MIPS_TASK_IMAGE_ENTRY_OFFS   0x74
-#define DIVA_MIPS_TASK_IMAGE_LOAD_ADDR_OFFS 0x78
-#define DIVA_MIPS_TASK_IMAGE_END_ADDR_OFFS 0x7c
-#define DIVA_MIPS_TASK_IMAGE_ID_STRING_OFFS 0x80
-/*
-  This function is called in order to set GP register of this task
-  This function should be always called before any function of the
-  task is called
-*/
-typedef void (*diva_task_set_prog_gp_proc_t)(void *new_gp);
-/*
-  This function is called to clear .bss at task initialization step
-*/
-typedef void (*diva_task_sys_reset_proc_t)(void);
-/*
-  This function is called in order to provide GP of master call to
-  task, that will be used by calls from the task to the master
-*/
-typedef void (*diva_task_set_main_gp_proc_t)(void *main_gp);
-/*
-  This function is called to provide address of 'dprintf' function
-  to the task
-*/
-typedef word (*diva_prt_proc_t)(char *, ...);
-typedef void (*diva_task_set_prt_proc_t)(diva_prt_proc_t fn);
-/*
-  This function is called to set task PID
-*/
-typedef void (*diva_task_set_pid_proc_t)(dword id);
-/*
-  This function is called for run-time task init
-*/
-typedef int (*diva_task_run_time_init_proc_t)(void*, dword);
-/*
-  This function is called from system scheduler or from timer
-*/
-typedef void (*diva_task_callback_proc_t)(void);
-/*
-  This callback is used by task to get current time im mS
-*/
-typedef dword (*diva_task_get_tick_count_proc_t)(void);
-typedef void (*diva_task_set_get_time_proc_t)(\
-	diva_task_get_tick_count_proc_t fn);
-typedef struct _diva_mips_sdp_task_entry {
-	diva_task_set_prog_gp_proc_t  set_gp_proc;
-	diva_task_sys_reset_proc_t   sys_reset_proc;
-	diva_task_set_main_gp_proc_t  set_main_gp_proc;
-	diva_task_set_prt_proc_t    set_dprintf_proc;
-	diva_task_set_pid_proc_t    set_pid_proc;
-	diva_task_run_time_init_proc_t run_time_init_proc;
-	diva_task_callback_proc_t    task_callback_proc;
-	diva_task_callback_proc_t    timer_callback_proc;
-	diva_task_set_get_time_proc_t  set_get_time_proc;
-	void *last_entry_proc;
-} diva_mips_sdp_task_entry_t;
-/*
-  'last_entry_proc' should be set to zero and is used for future extensuios
-*/
-typedef struct _diva_mips_sw_task {
-	diva_mips_sdp_task_entry_t  sdp_entry;
-	void *sdp_gp_reg;
-	void *own_gp_reg;
-} diva_mips_sw_task_t;
-#if !defined(DIVA_BRI2F_SDP_1_NAME)
-#define DIVA_BRI2F_SDP_1_NAME "sdp0.2q0"
-#endif
-#if !defined(DIVA_BRI2F_SDP_2_NAME)
-#define DIVA_BRI2F_SDP_2_NAME "sdp1.2q0"
-#endif
-#endif
diff --git a/drivers/isdn/hardware/eicon/um_idi.c b/drivers/isdn/hardware/eicon/um_idi.c
deleted file mode 100644
index db4dd4f..0000000
--- a/drivers/isdn/hardware/eicon/um_idi.c
+++ /dev/null
@@ -1,886 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: um_idi.c,v 1.14 2004/03/21 17:54:37 armin Exp $ */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "dqueue.h"
-#include "adapter.h"
-#include "entity.h"
-#include "um_xdi.h"
-#include "um_idi.h"
-#include "debuglib.h"
-#include "divasync.h"
-
-#define DIVAS_MAX_XDI_ADAPTERS	64
-
-/* --------------------------------------------------------------------------
-   IMPORTS
-   -------------------------------------------------------------------------- */
-extern void diva_os_wakeup_read(void *os_context);
-extern void diva_os_wakeup_close(void *os_context);
-/* --------------------------------------------------------------------------
-   LOCALS
-   -------------------------------------------------------------------------- */
-static LIST_HEAD(adapter_q);
-static diva_os_spin_lock_t adapter_lock;
-
-static diva_um_idi_adapter_t *diva_um_idi_find_adapter(dword nr);
-static void cleanup_adapter(diva_um_idi_adapter_t *a);
-static void cleanup_entity(divas_um_idi_entity_t *e);
-static int diva_user_mode_idi_adapter_features(diva_um_idi_adapter_t *a,
-					       diva_um_idi_adapter_features_t
-					       *features);
-static int process_idi_request(divas_um_idi_entity_t *e,
-			       const diva_um_idi_req_hdr_t *req);
-static int process_idi_rc(divas_um_idi_entity_t *e, byte rc);
-static int process_idi_ind(divas_um_idi_entity_t *e, byte ind);
-static int write_return_code(divas_um_idi_entity_t *e, byte rc);
-
-/* --------------------------------------------------------------------------
-   MAIN
-   -------------------------------------------------------------------------- */
-int diva_user_mode_idi_init(void)
-{
-	diva_os_initialize_spin_lock(&adapter_lock, "adapter");
-	return (0);
-}
-
-/* --------------------------------------------------------------------------
-   Copy adapter features to user supplied buffer
-   -------------------------------------------------------------------------- */
-static int
-diva_user_mode_idi_adapter_features(diva_um_idi_adapter_t *a,
-				    diva_um_idi_adapter_features_t *
-				    features)
-{
-	IDI_SYNC_REQ sync_req;
-
-	if ((a) && (a->d.request)) {
-		features->type = a->d.type;
-		features->features = a->d.features;
-		features->channels = a->d.channels;
-		memset(features->name, 0, sizeof(features->name));
-
-		sync_req.GetName.Req = 0;
-		sync_req.GetName.Rc = IDI_SYNC_REQ_GET_NAME;
-		(*(a->d.request)) ((ENTITY *)&sync_req);
-		strlcpy(features->name, sync_req.GetName.name,
-			sizeof(features->name));
-
-		sync_req.GetSerial.Req = 0;
-		sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL;
-		sync_req.GetSerial.serial = 0;
-		(*(a->d.request))((ENTITY *)&sync_req);
-		features->serial_number = sync_req.GetSerial.serial;
-	}
-
-	return ((a) ? 0 : -1);
-}
-
-/* --------------------------------------------------------------------------
-   REMOVE ADAPTER
-   -------------------------------------------------------------------------- */
-void diva_user_mode_idi_remove_adapter(int adapter_nr)
-{
-	struct list_head *tmp;
-	diva_um_idi_adapter_t *a;
-
-	list_for_each(tmp, &adapter_q) {
-		a = list_entry(tmp, diva_um_idi_adapter_t, link);
-		if (a->adapter_nr == adapter_nr) {
-			list_del(tmp);
-			cleanup_adapter(a);
-			DBG_LOG(("DIDD: del adapter(%d)", a->adapter_nr));
-			diva_os_free(0, a);
-			break;
-		}
-	}
-}
-
-/* --------------------------------------------------------------------------
-   CALLED ON DRIVER EXIT (UNLOAD)
-   -------------------------------------------------------------------------- */
-void diva_user_mode_idi_finit(void)
-{
-	struct list_head *tmp, *safe;
-	diva_um_idi_adapter_t *a;
-
-	list_for_each_safe(tmp, safe, &adapter_q) {
-		a = list_entry(tmp, diva_um_idi_adapter_t, link);
-		list_del(tmp);
-		cleanup_adapter(a);
-		DBG_LOG(("DIDD: del adapter(%d)", a->adapter_nr));
-		diva_os_free(0, a);
-	}
-	diva_os_destroy_spin_lock(&adapter_lock, "adapter");
-}
-
-/* -------------------------------------------------------------------------
-   CREATE AND INIT IDI ADAPTER
-   ------------------------------------------------------------------------- */
-int diva_user_mode_idi_create_adapter(const DESCRIPTOR *d, int adapter_nr)
-{
-	diva_os_spin_lock_magic_t old_irql;
-	diva_um_idi_adapter_t *a =
-		(diva_um_idi_adapter_t *) diva_os_malloc(0,
-							 sizeof
-							 (diva_um_idi_adapter_t));
-
-	if (!a) {
-		return (-1);
-	}
-	memset(a, 0x00, sizeof(*a));
-	INIT_LIST_HEAD(&a->entity_q);
-
-	a->d = *d;
-	a->adapter_nr = adapter_nr;
-
-	DBG_LOG(("DIDD_ADD A(%d), type:%02x, features:%04x, channels:%d",
-		 adapter_nr, a->d.type, a->d.features, a->d.channels));
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "create_adapter");
-	list_add_tail(&a->link, &adapter_q);
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "create_adapter");
-	return (0);
-}
-
-/* ------------------------------------------------------------------------
-   Find adapter by Adapter number
-   ------------------------------------------------------------------------ */
-static diva_um_idi_adapter_t *diva_um_idi_find_adapter(dword nr)
-{
-	diva_um_idi_adapter_t *a = NULL;
-	struct list_head *tmp;
-
-	list_for_each(tmp, &adapter_q) {
-		a = list_entry(tmp, diva_um_idi_adapter_t, link);
-		DBG_TRC(("find_adapter: (%d)-(%d)", nr, a->adapter_nr));
-		if (a->adapter_nr == (int)nr)
-			break;
-		a = NULL;
-	}
-	return (a);
-}
-
-/* ------------------------------------------------------------------------
-   Cleanup this adapter and cleanup/delete all entities assigned
-   to this adapter
-   ------------------------------------------------------------------------ */
-static void cleanup_adapter(diva_um_idi_adapter_t *a)
-{
-	struct list_head *tmp, *safe;
-	divas_um_idi_entity_t *e;
-
-	list_for_each_safe(tmp, safe, &a->entity_q) {
-		e = list_entry(tmp, divas_um_idi_entity_t, link);
-		list_del(tmp);
-		cleanup_entity(e);
-		if (e->os_context) {
-			diva_os_wakeup_read(e->os_context);
-			diva_os_wakeup_close(e->os_context);
-		}
-	}
-	memset(&a->d, 0x00, sizeof(DESCRIPTOR));
-}
-
-/* ------------------------------------------------------------------------
-   Cleanup, but NOT delete this entity
-   ------------------------------------------------------------------------ */
-static void cleanup_entity(divas_um_idi_entity_t *e)
-{
-	e->os_ref = NULL;
-	e->status = 0;
-	e->adapter = NULL;
-	e->e.Id = 0;
-	e->rc_count = 0;
-
-	e->status |= DIVA_UM_IDI_REMOVED;
-	e->status |= DIVA_UM_IDI_REMOVE_PENDING;
-
-	diva_data_q_finit(&e->data);
-	diva_data_q_finit(&e->rc);
-}
-
-
-/* ------------------------------------------------------------------------
-   Create ENTITY, link it to the adapter and remove pointer to entity
-   ------------------------------------------------------------------------ */
-void *divas_um_idi_create_entity(dword adapter_nr, void *file)
-{
-	divas_um_idi_entity_t *e;
-	diva_um_idi_adapter_t *a;
-	diva_os_spin_lock_magic_t old_irql;
-
-	if ((e = (divas_um_idi_entity_t *) diva_os_malloc(0, sizeof(*e)))) {
-		memset(e, 0x00, sizeof(*e));
-		if (!
-		    (e->os_context =
-		     diva_os_malloc(0, diva_os_get_context_size()))) {
-			DBG_LOG(("E(%08x) no memory for os context", e));
-			diva_os_free(0, e);
-			return NULL;
-		}
-		memset(e->os_context, 0x00, diva_os_get_context_size());
-
-		if ((diva_data_q_init(&e->data, 2048 + 512, 16))) {
-			diva_os_free(0, e->os_context);
-			diva_os_free(0, e);
-			return NULL;
-		}
-		if ((diva_data_q_init(&e->rc, sizeof(diva_um_idi_ind_hdr_t), 2))) {
-			diva_data_q_finit(&e->data);
-			diva_os_free(0, e->os_context);
-			diva_os_free(0, e);
-			return NULL;
-		}
-
-		diva_os_enter_spin_lock(&adapter_lock, &old_irql, "create_entity");
-		/*
-		  Look for Adapter requested
-		*/
-		if (!(a = diva_um_idi_find_adapter(adapter_nr))) {
-			/*
-			  No adapter was found, or this adapter was removed
-			*/
-			diva_os_leave_spin_lock(&adapter_lock, &old_irql, "create_entity");
-
-			DBG_LOG(("A: no adapter(%ld)", adapter_nr));
-
-			cleanup_entity(e);
-			diva_os_free(0, e->os_context);
-			diva_os_free(0, e);
-
-			return NULL;
-		}
-
-		e->os_ref = file;	/* link to os handle */
-		e->adapter = a;	/* link to adapter   */
-
-		list_add_tail(&e->link, &a->entity_q);	/* link from adapter */
-
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "create_entity");
-
-		DBG_LOG(("A(%ld), create E(%08x)", adapter_nr, e));
-	}
-
-	return (e);
-}
-
-/* ------------------------------------------------------------------------
-   Unlink entity and free memory
-   ------------------------------------------------------------------------ */
-int divas_um_idi_delete_entity(int adapter_nr, void *entity)
-{
-	divas_um_idi_entity_t *e;
-	diva_um_idi_adapter_t *a;
-	diva_os_spin_lock_magic_t old_irql;
-
-	if (!(e = (divas_um_idi_entity_t *) entity))
-		return (-1);
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "delete_entity");
-	if ((a = e->adapter)) {
-		list_del(&e->link);
-	}
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "delete_entity");
-
-	diva_um_idi_stop_wdog(entity);
-	cleanup_entity(e);
-	diva_os_free(0, e->os_context);
-	memset(e, 0x00, sizeof(*e));
-
-	DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
-	diva_os_free(0, e);
-
-	return (0);
-}
-
-/* --------------------------------------------------------------------------
-   Called by application to read data from IDI
-   -------------------------------------------------------------------------- */
-int diva_um_idi_read(void *entity,
-		     void *os_handle,
-		     void *dst,
-		     int max_length, divas_um_idi_copy_to_user_fn_t cp_fn)
-{
-	divas_um_idi_entity_t *e;
-	diva_um_idi_adapter_t *a;
-	const void *data;
-	int length, ret = 0;
-	diva_um_idi_data_queue_t *q;
-	diva_os_spin_lock_magic_t old_irql;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "read");
-
-	e = (divas_um_idi_entity_t *) entity;
-	if (!e || (!(a = e->adapter)) ||
-	    (e->status & DIVA_UM_IDI_REMOVE_PENDING) ||
-	    (e->status & DIVA_UM_IDI_REMOVED) ||
-	    (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "read");
-		DBG_ERR(("E(%08x) read failed - adapter removed", e))
-			return (-1);
-	}
-
-	DBG_TRC(("A(%d) E(%08x) read(%d)", a->adapter_nr, e, max_length));
-
-	/*
-	  Try to read return code first
-	*/
-	data = diva_data_q_get_segment4read(&e->rc);
-	q = &e->rc;
-
-	/*
-	  No return codes available, read indications now
-	*/
-	if (!data) {
-		if (!(e->status & DIVA_UM_IDI_RC_PENDING)) {
-			DBG_TRC(("A(%d) E(%08x) read data", a->adapter_nr, e));
-			data = diva_data_q_get_segment4read(&e->data);
-			q = &e->data;
-		}
-	} else {
-		e->status &= ~DIVA_UM_IDI_RC_PENDING;
-		DBG_TRC(("A(%d) E(%08x) read rc", a->adapter_nr, e));
-	}
-
-	if (data) {
-		if ((length = diva_data_q_get_segment_length(q)) >
-		    max_length) {
-			/*
-			  Not enough space to read message
-			*/
-			DBG_ERR(("A: A(%d) E(%08x) read small buffer",
-				 a->adapter_nr, e, ret));
-			diva_os_leave_spin_lock(&adapter_lock, &old_irql,
-						"read");
-			return (-2);
-		}
-		/*
-		  Copy it to user, this function does access ONLY locked an verified
-		  memory, also we can access it witch spin lock held
-		*/
-
-		if ((ret = (*cp_fn) (os_handle, dst, data, length)) >= 0) {
-			/*
-			  Acknowledge only if read was successful
-			*/
-			diva_data_q_ack_segment4read(q);
-		}
-	}
-
-
-	DBG_TRC(("A(%d) E(%08x) read=%d", a->adapter_nr, e, ret));
-
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "read");
-
-	return (ret);
-}
-
-
-int diva_um_idi_write(void *entity,
-		      void *os_handle,
-		      const void *src,
-		      int length, divas_um_idi_copy_from_user_fn_t cp_fn)
-{
-	divas_um_idi_entity_t *e;
-	diva_um_idi_adapter_t *a;
-	diva_um_idi_req_hdr_t *req;
-	void *data;
-	int ret = 0;
-	diva_os_spin_lock_magic_t old_irql;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "write");
-
-	e = (divas_um_idi_entity_t *) entity;
-	if (!e || (!(a = e->adapter)) ||
-	    (e->status & DIVA_UM_IDI_REMOVE_PENDING) ||
-	    (e->status & DIVA_UM_IDI_REMOVED) ||
-	    (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-		DBG_ERR(("E(%08x) write failed - adapter removed", e))
-			return (-1);
-	}
-
-	DBG_TRC(("A(%d) E(%08x) write(%d)", a->adapter_nr, e, length));
-
-	if ((length < sizeof(*req)) || (length > sizeof(e->buffer))) {
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-		return (-2);
-	}
-
-	if (e->status & DIVA_UM_IDI_RC_PENDING) {
-		DBG_ERR(("A: A(%d) E(%08x) rc pending", a->adapter_nr, e));
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-		return (-1);	/* should wait for RC code first */
-	}
-
-	/*
-	  Copy function does access only locked verified memory,
-	  also it can be called with spin lock held
-	*/
-	if ((ret = (*cp_fn) (os_handle, e->buffer, src, length)) < 0) {
-		DBG_TRC(("A: A(%d) E(%08x) write error=%d", a->adapter_nr,
-			 e, ret));
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-		return (ret);
-	}
-
-	req = (diva_um_idi_req_hdr_t *)&e->buffer[0];
-
-	switch (req->type) {
-	case DIVA_UM_IDI_GET_FEATURES:{
-		DBG_LOG(("A(%d) get_features", a->adapter_nr));
-		if (!(data =
-		      diva_data_q_get_segment4write(&e->data))) {
-			DBG_ERR(("A(%d) get_features, no free buffer",
-				 a->adapter_nr));
-			diva_os_leave_spin_lock(&adapter_lock,
-						&old_irql,
-						"write");
-			return (0);
-		}
-		diva_user_mode_idi_adapter_features(a, &(((diva_um_idi_ind_hdr_t
-							   *) data)->hdr.features));
-		((diva_um_idi_ind_hdr_t *) data)->type =
-			DIVA_UM_IDI_IND_FEATURES;
-		((diva_um_idi_ind_hdr_t *) data)->data_length = 0;
-		diva_data_q_ack_segment4write(&e->data,
-					      sizeof(diva_um_idi_ind_hdr_t));
-
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-
-		diva_os_wakeup_read(e->os_context);
-	}
-		break;
-
-	case DIVA_UM_IDI_REQ:
-	case DIVA_UM_IDI_REQ_MAN:
-	case DIVA_UM_IDI_REQ_SIG:
-	case DIVA_UM_IDI_REQ_NET:
-		DBG_TRC(("A(%d) REQ(%02d)-(%02d)-(%08x)", a->adapter_nr,
-			 req->Req, req->ReqCh,
-			 req->type & DIVA_UM_IDI_REQ_TYPE_MASK));
-		switch (process_idi_request(e, req)) {
-		case -1:
-			diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-			return (-1);
-		case -2:
-			diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-			diva_os_wakeup_read(e->os_context);
-			break;
-		default:
-			diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-			break;
-		}
-		break;
-
-	default:
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-		return (-1);
-	}
-
-	DBG_TRC(("A(%d) E(%08x) write=%d", a->adapter_nr, e, ret));
-
-	return (ret);
-}
-
-/* --------------------------------------------------------------------------
-   CALLBACK FROM XDI
-   -------------------------------------------------------------------------- */
-static void diva_um_idi_xdi_callback(ENTITY *entity)
-{
-	divas_um_idi_entity_t *e = DIVAS_CONTAINING_RECORD(entity,
-							   divas_um_idi_entity_t,
-							   e);
-	diva_os_spin_lock_magic_t old_irql;
-	int call_wakeup = 0;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "xdi_callback");
-
-	if (e->e.complete == 255) {
-		if (!(e->status & DIVA_UM_IDI_REMOVE_PENDING)) {
-			diva_um_idi_stop_wdog(e);
-		}
-		if ((call_wakeup = process_idi_rc(e, e->e.Rc))) {
-			if (e->rc_count) {
-				e->rc_count--;
-			}
-		}
-		e->e.Rc = 0;
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "xdi_callback");
-
-		if (call_wakeup) {
-			diva_os_wakeup_read(e->os_context);
-			diva_os_wakeup_close(e->os_context);
-		}
-	} else {
-		if (e->status & DIVA_UM_IDI_REMOVE_PENDING) {
-			e->e.RNum = 0;
-			e->e.RNR = 2;
-		} else {
-			call_wakeup = process_idi_ind(e, e->e.Ind);
-		}
-		e->e.Ind = 0;
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "xdi_callback");
-		if (call_wakeup) {
-			diva_os_wakeup_read(e->os_context);
-		}
-	}
-}
-
-static int process_idi_request(divas_um_idi_entity_t *e,
-			       const diva_um_idi_req_hdr_t *req)
-{
-	int assign = 0;
-	byte Req = (byte) req->Req;
-	dword type = req->type & DIVA_UM_IDI_REQ_TYPE_MASK;
-
-	if (!e->e.Id || !e->e.callback) {	/* not assigned */
-		if (Req != ASSIGN) {
-			DBG_ERR(("A: A(%d) E(%08x) not assigned",
-				 e->adapter->adapter_nr, e));
-			return (-1);	/* NOT ASSIGNED */
-		} else {
-			switch (type) {
-			case DIVA_UM_IDI_REQ_TYPE_MAN:
-				e->e.Id = MAN_ID;
-				DBG_TRC(("A(%d) E(%08x) assign MAN",
-					 e->adapter->adapter_nr, e));
-				break;
-
-			case DIVA_UM_IDI_REQ_TYPE_SIG:
-				e->e.Id = DSIG_ID;
-				DBG_TRC(("A(%d) E(%08x) assign SIG",
-					 e->adapter->adapter_nr, e));
-				break;
-
-			case DIVA_UM_IDI_REQ_TYPE_NET:
-				e->e.Id = NL_ID;
-				DBG_TRC(("A(%d) E(%08x) assign NET",
-					 e->adapter->adapter_nr, e));
-				break;
-
-			default:
-				DBG_ERR(("A: A(%d) E(%08x) unknown type=%08x",
-					 e->adapter->adapter_nr, e,
-					 type));
-				return (-1);
-			}
-		}
-		e->e.XNum = 1;
-		e->e.RNum = 1;
-		e->e.callback = diva_um_idi_xdi_callback;
-		e->e.X = &e->XData;
-		e->e.R = &e->RData;
-		assign = 1;
-	}
-	e->status |= DIVA_UM_IDI_RC_PENDING;
-	e->e.Req = Req;
-	e->e.ReqCh = (byte) req->ReqCh;
-	e->e.X->PLength = (word) req->data_length;
-	e->e.X->P = (byte *)&req[1];	/* Our buffer is safe */
-
-	DBG_TRC(("A(%d) E(%08x) request(%02x-%02x-%02x (%d))",
-		 e->adapter->adapter_nr, e, e->e.Id, e->e.Req,
-		 e->e.ReqCh, e->e.X->PLength));
-
-	e->rc_count++;
-
-	if (e->adapter && e->adapter->d.request) {
-		diva_um_idi_start_wdog(e);
-		(*(e->adapter->d.request)) (&e->e);
-	}
-
-	if (assign) {
-		if (e->e.Rc == OUT_OF_RESOURCES) {
-			/*
-			  XDI has no entities more, call was not forwarded to the card,
-			  no callback will be scheduled
-			*/
-			DBG_ERR(("A: A(%d) E(%08x) XDI out of entities",
-				 e->adapter->adapter_nr, e));
-
-			e->e.Id = 0;
-			e->e.ReqCh = 0;
-			e->e.RcCh = 0;
-			e->e.Ind = 0;
-			e->e.IndCh = 0;
-			e->e.XNum = 0;
-			e->e.RNum = 0;
-			e->e.callback = NULL;
-			e->e.X = NULL;
-			e->e.R = NULL;
-			write_return_code(e, ASSIGN_RC | OUT_OF_RESOURCES);
-			return (-2);
-		} else {
-			e->status |= DIVA_UM_IDI_ASSIGN_PENDING;
-		}
-	}
-
-	return (0);
-}
-
-static int process_idi_rc(divas_um_idi_entity_t *e, byte rc)
-{
-	DBG_TRC(("A(%d) E(%08x) rc(%02x-%02x-%02x)",
-		 e->adapter->adapter_nr, e, e->e.Id, rc, e->e.RcCh));
-
-	if (e->status & DIVA_UM_IDI_ASSIGN_PENDING) {
-		e->status &= ~DIVA_UM_IDI_ASSIGN_PENDING;
-		if (rc != ASSIGN_OK) {
-			DBG_ERR(("A: A(%d) E(%08x) ASSIGN failed",
-				 e->adapter->adapter_nr, e));
-			e->e.callback = NULL;
-			e->e.Id = 0;
-			e->e.Req = 0;
-			e->e.ReqCh = 0;
-			e->e.Rc = 0;
-			e->e.RcCh = 0;
-			e->e.Ind = 0;
-			e->e.IndCh = 0;
-			e->e.X = NULL;
-			e->e.R = NULL;
-			e->e.XNum = 0;
-			e->e.RNum = 0;
-		}
-	}
-	if ((e->e.Req == REMOVE) && e->e.Id && (rc == 0xff)) {
-		DBG_ERR(("A: A(%d) E(%08x)  discard OK in REMOVE",
-			 e->adapter->adapter_nr, e));
-		return (0);	/* let us do it in the driver */
-	}
-	if ((e->e.Req == REMOVE) && (!e->e.Id)) {	/* REMOVE COMPLETE */
-		e->e.callback = NULL;
-		e->e.Id = 0;
-		e->e.Req = 0;
-		e->e.ReqCh = 0;
-		e->e.Rc = 0;
-		e->e.RcCh = 0;
-		e->e.Ind = 0;
-		e->e.IndCh = 0;
-		e->e.X = NULL;
-		e->e.R = NULL;
-		e->e.XNum = 0;
-		e->e.RNum = 0;
-		e->rc_count = 0;
-	}
-	if ((e->e.Req == REMOVE) && (rc != 0xff)) {	/* REMOVE FAILED */
-		DBG_ERR(("A: A(%d) E(%08x)  REMOVE FAILED",
-			 e->adapter->adapter_nr, e));
-	}
-	write_return_code(e, rc);
-
-	return (1);
-}
-
-static int process_idi_ind(divas_um_idi_entity_t *e, byte ind)
-{
-	int do_wakeup = 0;
-
-	if (e->e.complete != 0x02) {
-		diva_um_idi_ind_hdr_t *pind =
-			(diva_um_idi_ind_hdr_t *)
-			diva_data_q_get_segment4write(&e->data);
-		if (pind) {
-			e->e.RNum = 1;
-			e->e.R->P = (byte *)&pind[1];
-			e->e.R->PLength =
-				(word) (diva_data_q_get_max_length(&e->data) -
-					sizeof(*pind));
-			DBG_TRC(("A(%d) E(%08x) ind_1(%02x-%02x-%02x)-[%d-%d]",
-				 e->adapter->adapter_nr, e, e->e.Id, ind,
-				 e->e.IndCh, e->e.RLength,
-				 e->e.R->PLength));
-
-		} else {
-			DBG_TRC(("A(%d) E(%08x) ind(%02x-%02x-%02x)-RNR",
-				 e->adapter->adapter_nr, e, e->e.Id, ind,
-				 e->e.IndCh));
-			e->e.RNum = 0;
-			e->e.RNR = 1;
-			do_wakeup = 1;
-		}
-	} else {
-		diva_um_idi_ind_hdr_t *pind =
-			(diva_um_idi_ind_hdr_t *) (e->e.R->P);
-
-		DBG_TRC(("A(%d) E(%08x) ind(%02x-%02x-%02x)-[%d]",
-			 e->adapter->adapter_nr, e, e->e.Id, ind,
-			 e->e.IndCh, e->e.R->PLength));
-
-		pind--;
-		pind->type = DIVA_UM_IDI_IND;
-		pind->hdr.ind.Ind = ind;
-		pind->hdr.ind.IndCh = e->e.IndCh;
-		pind->data_length = e->e.R->PLength;
-		diva_data_q_ack_segment4write(&e->data,
-					      (int) (sizeof(*pind) +
-						     e->e.R->PLength));
-		do_wakeup = 1;
-	}
-
-	if ((e->status & DIVA_UM_IDI_RC_PENDING) && !e->rc.count) {
-		do_wakeup = 0;
-	}
-
-	return (do_wakeup);
-}
-
-/* --------------------------------------------------------------------------
-   Write return code to the return code queue of entity
-   -------------------------------------------------------------------------- */
-static int write_return_code(divas_um_idi_entity_t *e, byte rc)
-{
-	diva_um_idi_ind_hdr_t *prc;
-
-	if (!(prc =
-	      (diva_um_idi_ind_hdr_t *) diva_data_q_get_segment4write(&e->rc)))
-	{
-		DBG_ERR(("A: A(%d) E(%08x) rc(%02x) lost",
-			 e->adapter->adapter_nr, e, rc));
-		e->status &= ~DIVA_UM_IDI_RC_PENDING;
-		return (-1);
-	}
-
-	prc->type = DIVA_UM_IDI_IND_RC;
-	prc->hdr.rc.Rc = rc;
-	prc->hdr.rc.RcCh = e->e.RcCh;
-	prc->data_length = 0;
-	diva_data_q_ack_segment4write(&e->rc, sizeof(*prc));
-
-	return (0);
-}
-
-/* --------------------------------------------------------------------------
-   Return amount of entries that can be bead from this entity or
-   -1 if adapter was removed
-   -------------------------------------------------------------------------- */
-int diva_user_mode_idi_ind_ready(void *entity, void *os_handle)
-{
-	divas_um_idi_entity_t *e;
-	diva_um_idi_adapter_t *a;
-	diva_os_spin_lock_magic_t old_irql;
-	int ret;
-
-	if (!entity)
-		return (-1);
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "ind_ready");
-	e = (divas_um_idi_entity_t *) entity;
-	a = e->adapter;
-
-	if ((!a) || (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
-		/*
-		  Adapter was unloaded
-		*/
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "ind_ready");
-		return (-1);	/* adapter was removed */
-	}
-	if (e->status & DIVA_UM_IDI_REMOVED) {
-		/*
-		  entity was removed as result of adapter removal
-		  user should assign this entity again
-		*/
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "ind_ready");
-		return (-1);
-	}
-
-	ret = e->rc.count + e->data.count;
-
-	if ((e->status & DIVA_UM_IDI_RC_PENDING) && !e->rc.count) {
-		ret = 0;
-	}
-
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "ind_ready");
-
-	return (ret);
-}
-
-void *diva_um_id_get_os_context(void *entity)
-{
-	return (((divas_um_idi_entity_t *) entity)->os_context);
-}
-
-int divas_um_idi_entity_assigned(void *entity)
-{
-	divas_um_idi_entity_t *e;
-	diva_um_idi_adapter_t *a;
-	int ret;
-	diva_os_spin_lock_magic_t old_irql;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "assigned?");
-
-
-	e = (divas_um_idi_entity_t *) entity;
-	if (!e || (!(a = e->adapter)) ||
-	    (e->status & DIVA_UM_IDI_REMOVED) ||
-	    (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "assigned?");
-		return (0);
-	}
-
-	e->status |= DIVA_UM_IDI_REMOVE_PENDING;
-
-	ret = (e->e.Id || e->rc_count
-	       || (e->status & DIVA_UM_IDI_ASSIGN_PENDING));
-
-	DBG_TRC(("Id:%02x, rc_count:%d, status:%08x", e->e.Id, e->rc_count,
-		 e->status))
-
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "assigned?");
-
-	return (ret);
-}
-
-int divas_um_idi_entity_start_remove(void *entity)
-{
-	divas_um_idi_entity_t *e;
-	diva_um_idi_adapter_t *a;
-	diva_os_spin_lock_magic_t old_irql;
-
-	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "start_remove");
-
-	e = (divas_um_idi_entity_t *) entity;
-	if (!e || (!(a = e->adapter)) ||
-	    (e->status & DIVA_UM_IDI_REMOVED) ||
-	    (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
-		return (0);
-	}
-
-	if (e->rc_count) {
-		/*
-		  Entity BUSY
-		*/
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
-		return (1);
-	}
-
-	if (!e->e.Id) {
-		/*
-		  Remove request was already pending, and arrived now
-		*/
-		diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
-		return (0);	/* REMOVE was pending */
-	}
-
-	/*
-	  Now send remove request
-	*/
-	e->e.Req = REMOVE;
-	e->e.ReqCh = 0;
-
-	e->rc_count++;
-
-	DBG_TRC(("A(%d) E(%08x) request(%02x-%02x-%02x (%d))",
-		 e->adapter->adapter_nr, e, e->e.Id, e->e.Req,
-		 e->e.ReqCh, e->e.X->PLength));
-
-	if (a->d.request)
-		(*(a->d.request)) (&e->e);
-
-	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
-
-	return (0);
-}
diff --git a/drivers/isdn/hardware/eicon/um_idi.h b/drivers/isdn/hardware/eicon/um_idi.h
deleted file mode 100644
index 9aedd9e..0000000
--- a/drivers/isdn/hardware/eicon/um_idi.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: um_idi.h,v 1.6 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVA_USER_MODE_IDI_CORE_H__
-#define __DIVA_USER_MODE_IDI_CORE_H__
-
-
-/*
-  interface between UM IDI core and OS dependent part
-*/
-int diva_user_mode_idi_init(void);
-void diva_user_mode_idi_finit(void);
-void *divas_um_idi_create_entity(dword adapter_nr, void *file);
-int divas_um_idi_delete_entity(int adapter_nr, void *entity);
-
-typedef int (*divas_um_idi_copy_to_user_fn_t) (void *os_handle,
-					       void *dst,
-					       const void *src,
-					       int length);
-typedef int (*divas_um_idi_copy_from_user_fn_t) (void *os_handle,
-						 void *dst,
-						 const void *src,
-						 int length);
-
-int diva_um_idi_read(void *entity,
-		     void *os_handle,
-		     void *dst,
-		     int max_length, divas_um_idi_copy_to_user_fn_t cp_fn);
-
-int diva_um_idi_write(void *entity,
-		      void *os_handle,
-		      const void *src,
-		      int length, divas_um_idi_copy_from_user_fn_t cp_fn);
-
-int diva_user_mode_idi_ind_ready(void *entity, void *os_handle);
-void *diva_um_id_get_os_context(void *entity);
-int diva_os_get_context_size(void);
-int divas_um_idi_entity_assigned(void *entity);
-int divas_um_idi_entity_start_remove(void *entity);
-
-void diva_um_idi_start_wdog(void *entity);
-void diva_um_idi_stop_wdog(void *entity);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/um_xdi.h b/drivers/isdn/hardware/eicon/um_xdi.h
deleted file mode 100644
index 1f37aa4..0000000
--- a/drivers/isdn/hardware/eicon/um_xdi.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: um_xdi.h,v 1.1.2.2 2002/10/02 14:38:38 armin Exp $ */
-
-#ifndef __DIVA_USER_MODE_XDI_H__
-#define __DIVA_USER_MODE_XDI_H__
-
-/*
-  Contains declaratiom of structures shared between application
-  and user mode idi driver
-*/
-
-typedef struct _diva_um_idi_adapter_features {
-	dword type;
-	dword features;
-	dword channels;
-	dword serial_number;
-	char name[128];
-} diva_um_idi_adapter_features_t;
-
-#define DIVA_UM_IDI_REQ_MASK			0x0000FFFF
-#define DIVA_UM_IDI_REQ_TYPE_MASK		(~(DIVA_UM_IDI_REQ_MASK))
-#define DIVA_UM_IDI_GET_FEATURES		1	/* trigger features indication */
-#define DIVA_UM_IDI_REQ				2
-#define DIVA_UM_IDI_REQ_TYPE_MAN		0x10000000
-#define DIVA_UM_IDI_REQ_TYPE_SIG		0x20000000
-#define DIVA_UM_IDI_REQ_TYPE_NET		0x30000000
-#define DIVA_UM_IDI_REQ_MAN			(DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_MAN)
-#define DIVA_UM_IDI_REQ_SIG			(DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_SIG)
-#define DIVA_UM_IDI_REQ_NET			(DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_NET)
-/*
-  data_length  bytes will follow this structure
-*/
-typedef struct _diva_um_idi_req_hdr {
-	dword type;
-	dword Req;
-	dword ReqCh;
-	dword data_length;
-} diva_um_idi_req_hdr_t;
-
-typedef struct _diva_um_idi_ind_parameters {
-	dword Ind;
-	dword IndCh;
-} diva_um_idi_ind_parameters_t;
-
-typedef struct _diva_um_idi_rc_parameters {
-	dword Rc;
-	dword RcCh;
-} diva_um_idi_rc_parameters_t;
-
-typedef union _diva_um_idi_ind {
-	diva_um_idi_adapter_features_t features;
-	diva_um_idi_ind_parameters_t ind;
-	diva_um_idi_rc_parameters_t rc;
-} diva_um_idi_ind_t;
-
-#define DIVA_UM_IDI_IND_FEATURES  1	/* features indication */
-#define DIVA_UM_IDI_IND           2
-#define DIVA_UM_IDI_IND_RC        3
-/*
-  data_length bytes of data follow
-  this structure
-*/
-typedef struct _diva_um_idi_ind_hdr {
-	dword type;
-	diva_um_idi_ind_t hdr;
-	dword data_length;
-} diva_um_idi_ind_hdr_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
deleted file mode 100644
index b036e21..0000000
--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: xdi_adapter.h,v 1.7 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVA_OS_XDI_ADAPTER_H__
-#define __DIVA_OS_XDI_ADAPTER_H__
-
-#define DIVAS_XDI_ADAPTER_BUS_PCI  0
-#define DIVAS_XDI_ADAPTER_BUS_ISA  1
-
-typedef struct _divas_pci_card_resources {
-	byte bus;
-	byte func;
-	void *hdev;
-
-	dword bar[8];		/* contains context of appropriate BAR Register */
-	void __iomem *addr[8];		/* same bar, but mapped into memory */
-	dword length[8];	/* bar length */
-	int mem_type_id[MAX_MEM_TYPE];
-	unsigned int qoffset;
-	byte irq;
-} divas_pci_card_resources_t;
-
-typedef union _divas_card_resources {
-	divas_pci_card_resources_t pci;
-} divas_card_resources_t;
-
-struct _diva_os_xdi_adapter;
-typedef int (*diva_init_card_proc_t)(struct _diva_os_xdi_adapter *a);
-typedef int (*diva_cmd_card_proc_t)(struct _diva_os_xdi_adapter *a,
-				    diva_xdi_um_cfg_cmd_t *data,
-				    int length);
-typedef void (*diva_xdi_clear_interrupts_proc_t)(struct
-						 _diva_os_xdi_adapter *);
-
-#define DIVA_XDI_MBOX_BUSY			1
-#define DIVA_XDI_MBOX_WAIT_XLOG	2
-
-typedef struct _xdi_mbox_t {
-	dword status;
-	diva_xdi_um_cfg_cmd_data_t cmd_data;
-	dword data_length;
-	void *data;
-} xdi_mbox_t;
-
-typedef struct _diva_os_idi_adapter_interface {
-	diva_init_card_proc_t cleanup_adapter_proc;
-	diva_cmd_card_proc_t cmd_proc;
-} diva_os_idi_adapter_interface_t;
-
-typedef struct _diva_os_xdi_adapter {
-	struct list_head link;
-	int CardIndex;
-	int CardOrdinal;
-	int controller;		/* number of this controller */
-	int Bus;		/* PCI, ISA, ... */
-	divas_card_resources_t resources;
-	char port_name[24];
-	ISDN_ADAPTER xdi_adapter;
-	xdi_mbox_t xdi_mbox;
-	diva_os_idi_adapter_interface_t interface;
-	struct _diva_os_xdi_adapter *slave_adapters[3];
-	void *slave_list;
-	void *proc_adapter_dir;	/* adapterX proc entry */
-	void *proc_info;	/* info proc entry     */
-	void *proc_grp_opt;	/* group_optimization  */
-	void *proc_d_l1_down;	/* dynamic_l1_down     */
-	volatile diva_xdi_clear_interrupts_proc_t clear_interrupts_proc;
-	dword dsp_mask;
-} diva_os_xdi_adapter_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/xdi_msg.h b/drivers/isdn/hardware/eicon/xdi_msg.h
deleted file mode 100644
index 0646079..0000000
--- a/drivers/isdn/hardware/eicon/xdi_msg.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */
-
-#ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__
-#define __DIVA_XDI_UM_CFG_MESSAGE_H__
-
-/*
-  Definition of messages used to communicate between
-  XDI device driver and user mode configuration utility
-*/
-
-/*
-  As acknowledge one DWORD - card ordinal will be read from the card
-*/
-#define DIVA_XDI_UM_CMD_GET_CARD_ORDINAL	0
-
-/*
-  no acknowledge will be generated, memory block will be written in the
-  memory at given offset
-*/
-#define DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK	1
-
-/*
-  no acknowledge will be genatated, FPGA will be programmed
-*/
-#define DIVA_XDI_UM_CMD_WRITE_FPGA				2
-
-/*
-  As acknowledge block of SDRAM will be read in the user buffer
-*/
-#define DIVA_XDI_UM_CMD_READ_SDRAM				3
-
-/*
-  As acknowledge dword with serial number will be read in the user buffer
-*/
-#define DIVA_XDI_UM_CMD_GET_SERIAL_NR			4
-
-/*
-  As acknowledge struct consisting from 9 dwords with PCI info.
-  dword[0...7] = 8 PCI BARS
-  dword[9]		 = IRQ
-*/
-#define DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG	5
-
-/*
-  Reset of the board + activation of primary
-  boot loader
-*/
-#define DIVA_XDI_UM_CMD_RESET_ADAPTER			6
-
-/*
-  Called after code download to start adapter
-  at specified address
-  Start does set new set of features due to fact that we not know
-  if protocol features have changed
-*/
-#define DIVA_XDI_UM_CMD_START_ADAPTER			7
-
-/*
-  Stop adapter, called if user
-  wishes to stop adapter without unload
-  of the driver, to reload adapter with
-  different protocol
-*/
-#define DIVA_XDI_UM_CMD_STOP_ADAPTER			8
-
-/*
-  Get state of current adapter
-  Acknowledge is one dword with following values:
-  0 - adapter ready for download
-  1 - adapter running
-  2 - adapter dead
-  3 - out of service, driver should be restarted or hardware problem
-*/
-#define DIVA_XDI_UM_CMD_GET_CARD_STATE		9
-
-/*
-  Reads XLOG entry from the card
-*/
-#define DIVA_XDI_UM_CMD_READ_XLOG_ENTRY		10
-
-/*
-  Set untranslated protocol code features
-*/
-#define DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES	11
-
-typedef struct _diva_xdi_um_cfg_cmd_data_set_features {
-	dword features;
-} diva_xdi_um_cfg_cmd_data_set_features_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_start {
-	dword offset;
-	dword features;
-} diva_xdi_um_cfg_cmd_data_start_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_write_sdram {
-	dword ram_number;
-	dword offset;
-	dword length;
-} diva_xdi_um_cfg_cmd_data_write_sdram_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_write_fpga {
-	dword fpga_number;
-	dword image_length;
-} diva_xdi_um_cfg_cmd_data_write_fpga_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_read_sdram {
-	dword ram_number;
-	dword offset;
-	dword length;
-} diva_xdi_um_cfg_cmd_data_read_sdram_t;
-
-typedef union _diva_xdi_um_cfg_cmd_data {
-	diva_xdi_um_cfg_cmd_data_write_sdram_t write_sdram;
-	diva_xdi_um_cfg_cmd_data_write_fpga_t write_fpga;
-	diva_xdi_um_cfg_cmd_data_read_sdram_t read_sdram;
-	diva_xdi_um_cfg_cmd_data_start_t start;
-	diva_xdi_um_cfg_cmd_data_set_features_t features;
-} diva_xdi_um_cfg_cmd_data_t;
-
-typedef struct _diva_xdi_um_cfg_cmd {
-	dword adapter;		/* Adapter number 1...N */
-	dword command;
-	diva_xdi_um_cfg_cmd_data_t command_data;
-	dword data_length;	/* Plain binary data will follow */
-} diva_xdi_um_cfg_cmd_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/xdi_vers.h b/drivers/isdn/hardware/eicon/xdi_vers.h
deleted file mode 100644
index b3479e5..0000000
--- a/drivers/isdn/hardware/eicon/xdi_vers.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision :    2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-static char diva_xdi_common_code_build[] = "102-52";
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 5acf6ab..6f60ace 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -52,10 +52,7 @@ static const struct w6692map  w6692_map[] =
 	{W6692_USR, "USR W6692"}
 };
 
-#ifndef PCI_VENDOR_ID_USR
-#define PCI_VENDOR_ID_USR	0x16ec
 #define PCI_DEVICE_ID_USR_6692	0x3409
-#endif
 
 struct w6692_ch {
 	struct bchannel		bch;
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index ea0e4c6..5b719b5 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -274,7 +274,7 @@ hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type *bz, u_char *bdata, int count
 	u_char *ptr, *ptr1, new_f2;
 	struct sk_buff *skb;
 	struct IsdnCardState *cs = bcs->cs;
-	int total, maxlen, new_z2;
+	int maxlen, new_z2;
 	z_type *zp;
 
 	if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
@@ -297,7 +297,6 @@ hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type *bz, u_char *bdata, int count
 	} else if (!(skb = dev_alloc_skb(count - 3)))
 		printk(KERN_WARNING "HFCPCI: receive out of memory\n");
 	else {
-		total = count;
 		count -= 3;
 		ptr = skb_put(skb, count);
 
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index ed68288..80af658 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -207,7 +207,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
 		return PTR_ERR(peer_net);
 
 	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
-				&vxcan_link_ops, tbp);
+				&vxcan_link_ops, tbp, extack);
 	if (IS_ERR(peer)) {
 		put_net(peer_net);
 		return PTR_ERR(peer);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2eb6876..aa4a1f5 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 		return ret;
 	}
 
+	ret = bcm_sf2_cfp_resume(ds);
+	if (ret)
+		return ret;
+
 	if (priv->hw_params.num_gphy == 1)
 		bcm_sf2_gphy_enable_set(ds, true);
 
@@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
 	spin_lock_init(&priv->indir_lock);
 	mutex_init(&priv->stats_mutex);
 	mutex_init(&priv->cfp.lock);
+	INIT_LIST_HEAD(&priv->cfp.rules_list);
 
 	/* CFP rule #0 cannot be used for specific classifications, flag it as
 	 * permanently used
@@ -1090,12 +1095,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
 	ret = bcm_sf2_mdio_register(ds);
 	if (ret) {
 		pr_err("failed to register MDIO bus\n");
 		return ret;
 	}
 
+	bcm_sf2_gphy_enable_set(priv->dev->ds, false);
+
 	ret = bcm_sf2_cfp_rst(priv);
 	if (ret) {
 		pr_err("failed to reset CFP\n");
@@ -1166,6 +1175,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
 
 	priv->wol_ports_mask = 0;
 	dsa_unregister_switch(priv->dev->ds);
+	bcm_sf2_cfp_exit(priv->dev->ds);
 	/* Disable all ports and interrupts */
 	bcm_sf2_sw_suspend(priv->dev->ds);
 	bcm_sf2_mdio_unregister(priv);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index cc31e98..faaef32 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv {
 	DECLARE_BITMAP(used, CFP_NUM_RULES);
 	DECLARE_BITMAP(unique, CFP_NUM_RULES);
 	unsigned int rules_cnt;
+	struct list_head rules_list;
 };
 
 struct bcm_sf2_priv {
@@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
 		      struct ethtool_rxnfc *nfc);
 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+void bcm_sf2_cfp_exit(struct dsa_switch *ds);
+int bcm_sf2_cfp_resume(struct dsa_switch *ds);
 
 #endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f27..e14663a 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -20,6 +20,12 @@
 #include "bcm_sf2.h"
 #include "bcm_sf2_regs.h"
 
+struct cfp_rule {
+	int port;
+	struct ethtool_rx_flow_spec fs;
+	struct list_head next;
+};
+
 struct cfp_udf_slice_layout {
 	u8 slices[UDFS_PER_SLICE];
 	u32 mask_value;
@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
 	core_writel(priv, reg, offset);
 }
 
+static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
+					      int port, u32 location)
+{
+	struct cfp_rule *rule = NULL;
+
+	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+		if (rule->port == port && rule->fs.location == location)
+			break;
+	}
+
+	return rule;
+}
+
+static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
+				struct ethtool_rx_flow_spec *fs)
+{
+	struct cfp_rule *rule = NULL;
+	size_t fs_size = 0;
+	int ret = 1;
+
+	if (list_empty(&priv->cfp.rules_list))
+		return ret;
+
+	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+		ret = 1;
+		if (rule->port != port)
+			continue;
+
+		if (rule->fs.flow_type != fs->flow_type ||
+		    rule->fs.ring_cookie != fs->ring_cookie ||
+		    rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+			continue;
+
+		switch (fs->flow_type & ~FLOW_EXT) {
+		case TCP_V6_FLOW:
+		case UDP_V6_FLOW:
+			fs_size = sizeof(struct ethtool_tcpip6_spec);
+			break;
+		case TCP_V4_FLOW:
+		case UDP_V4_FLOW:
+			fs_size = sizeof(struct ethtool_tcpip4_spec);
+			break;
+		default:
+			continue;
+		}
+
+		ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
+		ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+		if (ret == 0)
+			break;
+	}
+
+	return ret;
+}
+
 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 				     unsigned int port_num,
 				     unsigned int queue_num,
@@ -728,27 +789,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	return ret;
 }
 
-static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
-				struct ethtool_rx_flow_spec *fs)
+static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
+				   struct ethtool_rx_flow_spec *fs)
 {
 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
 	s8 cpu_port = ds->ports[port].cpu_dp->index;
 	__u64 ring_cookie = fs->ring_cookie;
 	unsigned int queue_num, port_num;
-	int ret = -EINVAL;
-
-	/* Check for unsupported extensions */
-	if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
-	     fs->m_ext.data[1]))
-		return -EINVAL;
-
-	if (fs->location != RX_CLS_LOC_ANY &&
-	    test_bit(fs->location, priv->cfp.used))
-		return -EBUSY;
-
-	if (fs->location != RX_CLS_LOC_ANY &&
-	    fs->location > bcm_sf2_cfp_rule_size(priv))
-		return -EINVAL;
+	int ret;
 
 	/* This rule is a Wake-on-LAN filter and we must specifically
 	 * target the CPU port in order for it to be working.
@@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
 						queue_num, fs);
 		break;
 	default:
+		ret = -EINVAL;
 		break;
 	}
 
 	return ret;
 }
 
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+				struct ethtool_rx_flow_spec *fs)
+{
+	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+	struct cfp_rule *rule = NULL;
+	int ret = -EINVAL;
+
+	/* Check for unsupported extensions */
+	if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
+	     fs->m_ext.data[1]))
+		return -EINVAL;
+
+	if (fs->location != RX_CLS_LOC_ANY &&
+	    test_bit(fs->location, priv->cfp.used))
+		return -EBUSY;
+
+	if (fs->location != RX_CLS_LOC_ANY &&
+	    fs->location > bcm_sf2_cfp_rule_size(priv))
+		return -EINVAL;
+
+	ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
+	if (ret == 0)
+		return -EEXIST;
+
+	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+	if (!rule)
+		return -ENOMEM;
+
+	ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
+	if (ret) {
+		kfree(rule);
+		return ret;
+	}
+
+	rule->port = port;
+	memcpy(&rule->fs, fs, sizeof(*fs));
+	list_add_tail(&rule->next, &priv->cfp.rules_list);
+
+	return ret;
+}
+
 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
 				    u32 loc, u32 *next_loc)
 {
@@ -830,12 +920,28 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
 	return 0;
 }
 
-static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
-				u32 loc)
+static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
+				   u32 loc)
 {
 	u32 next_loc = 0;
 	int ret;
 
+	ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
+	if (ret)
+		return ret;
+
+	/* If this was an IPv6 rule, delete is companion rule too */
+	if (next_loc)
+		ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
+
+	return ret;
+}
+
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
+{
+	struct cfp_rule *rule;
+	int ret;
+
 	/* Refuse deleting unused rules, and those that are not unique since
 	 * that could leave IPv6 rules with one of the chained rule in the
 	 * table.
@@ -843,13 +949,14 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
 	if (!test_bit(loc, priv->cfp.unique) || loc == 0)
 		return -EINVAL;
 
-	ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
-	if (ret)
-		return ret;
+	rule = bcm_sf2_cfp_rule_find(priv, port, loc);
+	if (!rule)
+		return -EINVAL;
 
-	/* If this was an IPv6 rule, delete is companion rule too */
-	if (next_loc)
-		ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
+	ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
+
+	list_del(&rule->next);
+	kfree(rule);
 
 	return ret;
 }
@@ -867,305 +974,17 @@ static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
 	flow->m_ext.data[1] ^= cpu_to_be32(~0);
 }
 
-static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
-				    struct ethtool_tcpip4_spec *v4_spec,
-				    bool mask)
-{
-	u32 reg, offset, ipv4;
-	u16 src_dst_port;
-
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(3);
-	else
-		offset = CORE_CFP_DATA_PORT(3);
-
-	reg = core_readl(priv, offset);
-	/* src port [15:8] */
-	src_dst_port = reg << 8;
-
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(2);
-	else
-		offset = CORE_CFP_DATA_PORT(2);
-
-	reg = core_readl(priv, offset);
-	/* src port [7:0] */
-	src_dst_port |= (reg >> 24);
-
-	v4_spec->pdst = cpu_to_be16(src_dst_port);
-	v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
-
-	/* IPv4 dst [15:8] */
-	ipv4 = (reg & 0xff) << 8;
-
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(1);
-	else
-		offset = CORE_CFP_DATA_PORT(1);
-
-	reg = core_readl(priv, offset);
-	/* IPv4 dst [31:16] */
-	ipv4 |= ((reg >> 8) & 0xffff) << 16;
-	/* IPv4 dst [7:0] */
-	ipv4 |= (reg >> 24) & 0xff;
-	v4_spec->ip4dst = cpu_to_be32(ipv4);
-
-	/* IPv4 src [15:8] */
-	ipv4 = (reg & 0xff) << 8;
-
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(0);
-	else
-		offset = CORE_CFP_DATA_PORT(0);
-	reg = core_readl(priv, offset);
-
-	/* Once the TCAM is programmed, the mask reflects the slice number
-	 * being matched, don't bother checking it when reading back the
-	 * mask spec
-	 */
-	if (!mask && !(reg & SLICE_VALID))
-		return -EINVAL;
-
-	/* IPv4 src [7:0] */
-	ipv4 |= (reg >> 24) & 0xff;
-	/* IPv4 src [31:16] */
-	ipv4 |= ((reg >> 8) & 0xffff) << 16;
-	v4_spec->ip4src = cpu_to_be32(ipv4);
-
-	return 0;
-}
-
-static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
-				     struct ethtool_rx_flow_spec *fs)
-{
-	struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
-	u32 reg;
-	int ret;
-
-	reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
-	switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
-	case IPPROTO_TCP:
-		fs->flow_type = TCP_V4_FLOW;
-		v4_spec = &fs->h_u.tcp_ip4_spec;
-		v4_m_spec = &fs->m_u.tcp_ip4_spec;
-		break;
-	case IPPROTO_UDP:
-		fs->flow_type = UDP_V4_FLOW;
-		v4_spec = &fs->h_u.udp_ip4_spec;
-		v4_m_spec = &fs->m_u.udp_ip4_spec;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
-	v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
-
-	ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
-	if (ret)
-		return ret;
-
-	return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
-}
-
-static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
-				     __be32 *ip6_addr, __be16 *port,
-				     bool mask)
-{
-	u32 reg, tmp, offset;
-
-	/* C-Tag		[31:24]
-	 * UDF_n_B8		[23:8] (port)
-	 * UDF_n_B7 (upper)	[7:0] (addr[15:8])
-	 */
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(4);
-	else
-		offset = CORE_CFP_DATA_PORT(4);
-	reg = core_readl(priv, offset);
-	*port = cpu_to_be32(reg) >> 8;
-	tmp = (u32)(reg & 0xff) << 8;
-
-	/* UDF_n_B7 (lower)	[31:24] (addr[7:0])
-	 * UDF_n_B6		[23:8] (addr[31:16])
-	 * UDF_n_B5 (upper)	[7:0] (addr[47:40])
-	 */
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(3);
-	else
-		offset = CORE_CFP_DATA_PORT(3);
-	reg = core_readl(priv, offset);
-	tmp |= (reg >> 24) & 0xff;
-	tmp |= (u32)((reg >> 8) << 16);
-	ip6_addr[3] = cpu_to_be32(tmp);
-	tmp = (u32)(reg & 0xff) << 8;
-
-	/* UDF_n_B5 (lower)	[31:24] (addr[39:32])
-	 * UDF_n_B4		[23:8] (addr[63:48])
-	 * UDF_n_B3 (upper)	[7:0] (addr[79:72])
-	 */
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(2);
-	else
-		offset = CORE_CFP_DATA_PORT(2);
-	reg = core_readl(priv, offset);
-	tmp |= (reg >> 24) & 0xff;
-	tmp |= (u32)((reg >> 8) << 16);
-	ip6_addr[2] = cpu_to_be32(tmp);
-	tmp = (u32)(reg & 0xff) << 8;
-
-	/* UDF_n_B3 (lower)	[31:24] (addr[71:64])
-	 * UDF_n_B2		[23:8] (addr[95:80])
-	 * UDF_n_B1 (upper)	[7:0] (addr[111:104])
-	 */
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(1);
-	else
-		offset = CORE_CFP_DATA_PORT(1);
-	reg = core_readl(priv, offset);
-	tmp |= (reg >> 24) & 0xff;
-	tmp |= (u32)((reg >> 8) << 16);
-	ip6_addr[1] = cpu_to_be32(tmp);
-	tmp = (u32)(reg & 0xff) << 8;
-
-	/* UDF_n_B1 (lower)	[31:24] (addr[103:96])
-	 * UDF_n_B0		[23:8] (addr[127:112])
-	 * Reserved		[7:4]
-	 * Slice ID		[3:2]
-	 * Slice valid		[1:0]
-	 */
-	if (mask)
-		offset = CORE_CFP_MASK_PORT(0);
-	else
-		offset = CORE_CFP_DATA_PORT(0);
-	reg = core_readl(priv, offset);
-	tmp |= (reg >> 24) & 0xff;
-	tmp |= (u32)((reg >> 8) << 16);
-	ip6_addr[0] = cpu_to_be32(tmp);
-
-	if (!mask && !(reg & SLICE_VALID))
-		return -EINVAL;
-
-	return 0;
-}
-
-static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
-				     struct ethtool_rx_flow_spec *fs,
-				     u32 next_loc)
-{
-	struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
-	u32 reg;
-	int ret;
-
-	/* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
-	 * assuming tcp_ip6_spec here being an union.
-	 */
-	v6_spec = &fs->h_u.tcp_ip6_spec;
-	v6_m_spec = &fs->m_u.tcp_ip6_spec;
-
-	/* Read the second half first */
-	ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
-				       false);
-	if (ret)
-		return ret;
-
-	ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
-				       &v6_m_spec->pdst, true);
-	if (ret)
-		return ret;
-
-	/* Read last to avoid next entry clobbering the results during search
-	 * operations. We would not have the port enabled for this rule, so
-	 * don't bother checking it.
-	 */
-	(void)core_readl(priv, CORE_CFP_DATA_PORT(7));
-
-	/* The slice number is valid, so read the rule we are chained from now
-	 * which is our first half.
-	 */
-	bcm_sf2_cfp_rule_addr_set(priv, next_loc);
-	ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
-	if (ret)
-		return ret;
-
-	reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
-	switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
-	case IPPROTO_TCP:
-		fs->flow_type = TCP_V6_FLOW;
-		break;
-	case IPPROTO_UDP:
-		fs->flow_type = UDP_V6_FLOW;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
-				       false);
-	if (ret)
-		return ret;
-
-	return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
-					&v6_m_spec->psrc, true);
-}
-
 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
 				struct ethtool_rxnfc *nfc)
 {
-	u32 reg, ipv4_or_chain_id;
-	unsigned int queue_num;
-	int ret;
+	struct cfp_rule *rule;
 
-	bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
-
-	ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
-	if (ret)
-		return ret;
-
-	reg = core_readl(priv, CORE_ACT_POL_DATA0);
-
-	ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
-	if (ret)
-		return ret;
-
-	/* Extract the destination port */
-	nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
-				  DST_MAP_IB_MASK) - 1;
-
-	/* There is no Port 6, so we compensate for that here */
-	if (nfc->fs.ring_cookie >= 6)
-		nfc->fs.ring_cookie++;
-	nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
-
-	/* Extract the destination queue */
-	queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
-	nfc->fs.ring_cookie += queue_num;
-
-	/* Extract the L3_FRAMING or CHAIN_ID */
-	reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
-	/* With IPv6 rules this would contain a non-zero chain ID since
-	 * we reserve entry 0 and it cannot be used. So if we read 0 here
-	 * this means an IPv4 rule.
-	 */
-	ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
-	if (ipv4_or_chain_id == 0)
-		ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
-	else
-		ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
-						ipv4_or_chain_id);
-	if (ret)
-		return ret;
-
-	/* Read last to avoid next entry clobbering the results during search
-	 * operations
-	 */
-	reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
-	if (!(reg & 1 << port))
+	rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
+	if (!rule)
 		return -EINVAL;
 
+	memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
+
 	bcm_sf2_invert_masks(&nfc->fs);
 
 	/* Put the TCAM size here */
@@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
 
 	return 0;
 }
+
+void bcm_sf2_cfp_exit(struct dsa_switch *ds)
+{
+	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+	struct cfp_rule *rule, *n;
+
+	if (list_empty(&priv->cfp.rules_list))
+		return;
+
+	list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
+		bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
+}
+
+int bcm_sf2_cfp_resume(struct dsa_switch *ds)
+{
+	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+	struct cfp_rule *rule;
+	int ret = 0;
+	u32 reg;
+
+	if (list_empty(&priv->cfp.rules_list))
+		return ret;
+
+	reg = core_readl(priv, CORE_CFP_CTL_REG);
+	reg &= ~CFP_EN_MAP_MASK;
+	core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+	ret = bcm_sf2_cfp_rst(priv);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+		ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
+					      rule->fs.location);
+		if (ret) {
+			dev_err(ds->dev, "failed to remove rule\n");
+			return ret;
+		}
+
+		ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
+		if (ret) {
+			dev_err(ds->dev, "failed to restore rule\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index a8b8f59..a8caf92 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,12 +1,16 @@
-menuconfig MICROCHIP_KSZ
-	tristate "Microchip KSZ series switch support"
+config NET_DSA_MICROCHIP_KSZ_COMMON
+	tristate
+
+menuconfig NET_DSA_MICROCHIP_KSZ9477
+	tristate "Microchip KSZ9477 series switch support"
 	depends on NET_DSA
 	select NET_DSA_TAG_KSZ
+	select NET_DSA_MICROCHIP_KSZ_COMMON
 	help
-	  This driver adds support for Microchip KSZ switch chips.
+	  This driver adds support for Microchip KSZ9477 switch chips.
 
-config MICROCHIP_KSZ_SPI_DRIVER
-	tristate "KSZ series SPI connected switch driver"
-	depends on MICROCHIP_KSZ && SPI
+config NET_DSA_MICROCHIP_KSZ9477_SPI
+	tristate "KSZ9477 series SPI connected switch driver"
+	depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
 	help
 	  Select to enable support for registering switches configured through SPI.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index ed335e2..3142c18 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_MICROCHIP_KSZ)	        += ksz_common.o
-obj-$(CONFIG_MICROCHIP_KSZ_SPI_DRIVER)	+= ksz_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON)	+= ksz_common.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477)		+= ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI)	+= ksz9477_spi.o
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
new file mode 100644
index 0000000..0684657
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 switch driver main logic
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz_priv.h"
+#include "ksz_common.h"
+#include "ksz9477_reg.h"
+
+static const struct {
+	int index;
+	char string[ETH_GSTRING_LEN];
+} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
+	{ 0x00, "rx_hi" },
+	{ 0x01, "rx_undersize" },
+	{ 0x02, "rx_fragments" },
+	{ 0x03, "rx_oversize" },
+	{ 0x04, "rx_jabbers" },
+	{ 0x05, "rx_symbol_err" },
+	{ 0x06, "rx_crc_err" },
+	{ 0x07, "rx_align_err" },
+	{ 0x08, "rx_mac_ctrl" },
+	{ 0x09, "rx_pause" },
+	{ 0x0A, "rx_bcast" },
+	{ 0x0B, "rx_mcast" },
+	{ 0x0C, "rx_ucast" },
+	{ 0x0D, "rx_64_or_less" },
+	{ 0x0E, "rx_65_127" },
+	{ 0x0F, "rx_128_255" },
+	{ 0x10, "rx_256_511" },
+	{ 0x11, "rx_512_1023" },
+	{ 0x12, "rx_1024_1522" },
+	{ 0x13, "rx_1523_2000" },
+	{ 0x14, "rx_2001" },
+	{ 0x15, "tx_hi" },
+	{ 0x16, "tx_late_col" },
+	{ 0x17, "tx_pause" },
+	{ 0x18, "tx_bcast" },
+	{ 0x19, "tx_mcast" },
+	{ 0x1A, "tx_ucast" },
+	{ 0x1B, "tx_deferred" },
+	{ 0x1C, "tx_total_col" },
+	{ 0x1D, "tx_exc_col" },
+	{ 0x1E, "tx_single_col" },
+	{ 0x1F, "tx_mult_col" },
+	{ 0x80, "rx_total" },
+	{ 0x81, "tx_total" },
+	{ 0x82, "rx_discards" },
+	{ 0x83, "tx_discards" },
+};
+
+static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
+{
+	u32 data;
+
+	ksz_read32(dev, addr, &data);
+	if (set)
+		data |= bits;
+	else
+		data &= ~bits;
+	ksz_write32(dev, addr, data);
+}
+
+static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
+			       u32 bits, bool set)
+{
+	u32 addr;
+	u32 data;
+
+	addr = PORT_CTRL_ADDR(port, offset);
+	ksz_read32(dev, addr, &data);
+
+	if (set)
+		data |= bits;
+	else
+		data &= ~bits;
+
+	ksz_write32(dev, addr, data);
+}
+
+static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton,
+					int timeout)
+{
+	u8 data;
+
+	do {
+		ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
+		if (!(data & waiton))
+			break;
+		usleep_range(1, 10);
+	} while (timeout-- > 0);
+
+	if (timeout <= 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
+				  u32 *vlan_table)
+{
+	int ret;
+
+	mutex_lock(&dev->vlan_mutex);
+
+	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
+
+	/* wait to be cleared */
+	ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+	if (ret < 0) {
+		dev_dbg(dev->dev, "Failed to read vlan table\n");
+		goto exit;
+	}
+
+	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
+	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
+	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
+
+	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+exit:
+	mutex_unlock(&dev->vlan_mutex);
+
+	return ret;
+}
+
+static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
+				  u32 *vlan_table)
+{
+	int ret;
+
+	mutex_lock(&dev->vlan_mutex);
+
+	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
+	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
+	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
+
+	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
+
+	/* wait to be cleared */
+	ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+	if (ret < 0) {
+		dev_dbg(dev->dev, "Failed to write vlan table\n");
+		goto exit;
+	}
+
+	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+	/* update vlan cache table */
+	dev->vlan_cache[vid].table[0] = vlan_table[0];
+	dev->vlan_cache[vid].table[1] = vlan_table[1];
+	dev->vlan_cache[vid].table[2] = vlan_table[2];
+
+exit:
+	mutex_unlock(&dev->vlan_mutex);
+
+	return ret;
+}
+
+static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
+{
+	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
+	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
+	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
+	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
+}
+
+static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
+{
+	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
+	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
+	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
+	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
+}
+
+static int ksz9477_wait_alu_ready(struct ksz_device *dev, u32 waiton,
+				  int timeout)
+{
+	u32 data;
+
+	do {
+		ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
+		if (!(data & waiton))
+			break;
+		usleep_range(1, 10);
+	} while (timeout-- > 0);
+
+	if (timeout <= 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev, u32 waiton,
+				      int timeout)
+{
+	u32 data;
+
+	do {
+		ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
+		if (!(data & waiton))
+			break;
+		usleep_range(1, 10);
+	} while (timeout-- > 0);
+
+	if (timeout <= 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int ksz9477_reset_switch(struct ksz_device *dev)
+{
+	u8 data8;
+	u16 data16;
+	u32 data32;
+
+	/* reset switch */
+	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+
+	/* turn off SPI DO Edge select */
+	ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+	data8 &= ~SPI_AUTO_EDGE_DETECTION;
+	ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+
+	/* default configuration */
+	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+	data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+	      SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+
+	/* disable interrupts */
+	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
+	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+
+	/* set broadcast storm protection 10% rate */
+	ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
+	data16 &= ~BROADCAST_STORM_RATE;
+	data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+	ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
+
+	return 0;
+}
+
+static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
+						      int port)
+{
+	return DSA_TAG_PROTO_KSZ;
+}
+
+static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+	struct ksz_device *dev = ds->priv;
+	u16 val = 0xffff;
+
+	/* No real PHY after this. Simulate the PHY.
+	 * A fixed PHY can be setup in the device tree, but this function is
+	 * still called for that port during initialization.
+	 * For RGMII PHY there is no way to access it so the fixed PHY should
+	 * be used.  For SGMII PHY the supporting code will be added later.
+	 */
+	if (addr >= dev->phy_port_cnt) {
+		struct ksz_port *p = &dev->ports[addr];
+
+		switch (reg) {
+		case MII_BMCR:
+			val = 0x1140;
+			break;
+		case MII_BMSR:
+			val = 0x796d;
+			break;
+		case MII_PHYSID1:
+			val = 0x0022;
+			break;
+		case MII_PHYSID2:
+			val = 0x1631;
+			break;
+		case MII_ADVERTISE:
+			val = 0x05e1;
+			break;
+		case MII_LPA:
+			val = 0xc5e1;
+			break;
+		case MII_CTRL1000:
+			val = 0x0700;
+			break;
+		case MII_STAT1000:
+			if (p->phydev.speed == SPEED_1000)
+				val = 0x3800;
+			else
+				val = 0;
+			break;
+		}
+	} else {
+		ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+	}
+
+	return val;
+}
+
+static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
+			       u16 val)
+{
+	struct ksz_device *dev = ds->priv;
+
+	/* No real PHY after this. */
+	if (addr >= dev->phy_port_cnt)
+		return 0;
+	ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+
+	return 0;
+}
+
+static void ksz9477_get_strings(struct dsa_switch *ds, int port,
+				u32 stringset, uint8_t *buf)
+{
+	int i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+		memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string,
+		       ETH_GSTRING_LEN);
+	}
+}
+
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+				  uint64_t *buf)
+{
+	struct ksz_device *dev = ds->priv;
+	int i;
+	u32 data;
+	int timeout;
+
+	mutex_lock(&dev->stats_mutex);
+
+	for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+		data = MIB_COUNTER_READ;
+		data |= ((ksz9477_mib_names[i].index & 0xFF) <<
+			MIB_COUNTER_INDEX_S);
+		ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
+
+		timeout = 1000;
+		do {
+			ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
+				    &data);
+			usleep_range(1, 10);
+			if (!(data & MIB_COUNTER_READ))
+				break;
+		} while (timeout-- > 0);
+
+		/* failed to read MIB. get out of loop */
+		if (!timeout) {
+			dev_dbg(dev->dev, "Failed to get MIB\n");
+			break;
+		}
+
+		/* count resets upon read */
+		ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+
+		dev->mib_value[i] += (uint64_t)data;
+		buf[i] = dev->mib_value[i];
+	}
+
+	mutex_unlock(&dev->stats_mutex);
+}
+
+static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
+				    u8 member)
+{
+	ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
+	dev->ports[port].member = member;
+}
+
+static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
+				       u8 state)
+{
+	struct ksz_device *dev = ds->priv;
+	struct ksz_port *p = &dev->ports[port];
+	u8 data;
+	int member = -1;
+
+	ksz_pread8(dev, port, P_STP_CTRL, &data);
+	data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+	switch (state) {
+	case BR_STATE_DISABLED:
+		data |= PORT_LEARN_DISABLE;
+		if (port != dev->cpu_port)
+			member = 0;
+		break;
+	case BR_STATE_LISTENING:
+		data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+		if (port != dev->cpu_port &&
+		    p->stp_state == BR_STATE_DISABLED)
+			member = dev->host_mask | p->vid_member;
+		break;
+	case BR_STATE_LEARNING:
+		data |= PORT_RX_ENABLE;
+		break;
+	case BR_STATE_FORWARDING:
+		data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+
+		/* This function is also used internally. */
+		if (port == dev->cpu_port)
+			break;
+
+		member = dev->host_mask | p->vid_member;
+
+		/* Port is a member of a bridge. */
+		if (dev->br_member & (1 << port)) {
+			dev->member |= (1 << port);
+			member = dev->member;
+		}
+		break;
+	case BR_STATE_BLOCKING:
+		data |= PORT_LEARN_DISABLE;
+		if (port != dev->cpu_port &&
+		    p->stp_state == BR_STATE_DISABLED)
+			member = dev->host_mask | p->vid_member;
+		break;
+	default:
+		dev_err(ds->dev, "invalid STP state: %d\n", state);
+		return;
+	}
+
+	ksz_pwrite8(dev, port, P_STP_CTRL, data);
+	p->stp_state = state;
+	if (data & PORT_RX_ENABLE)
+		dev->rx_ports |= (1 << port);
+	else
+		dev->rx_ports &= ~(1 << port);
+	if (data & PORT_TX_ENABLE)
+		dev->tx_ports |= (1 << port);
+	else
+		dev->tx_ports &= ~(1 << port);
+
+	/* Port membership may share register with STP state. */
+	if (member >= 0 && member != p->member)
+		ksz9477_cfg_port_member(dev, port, (u8)member);
+
+	/* Check if forwarding needs to be updated. */
+	if (state != BR_STATE_FORWARDING) {
+		if (dev->br_member & (1 << port))
+			dev->member &= ~(1 << port);
+	}
+
+	/* When topology has changed the function ksz_update_port_member
+	 * should be called to modify port forwarding behavior.  However
+	 * as the offload_fwd_mark indication cannot be reported here
+	 * the switch forwarding function is not enabled.
+	 */
+}
+
+static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+	u8 data;
+
+	ksz_read8(dev, REG_SW_LUE_CTRL_2, &data);
+	data &= ~(SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S);
+	data |= (SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
+	ksz_write8(dev, REG_SW_LUE_CTRL_2, data);
+	if (port < dev->mib_port_cnt) {
+		/* flush individual port */
+		ksz_pread8(dev, port, P_STP_CTRL, &data);
+		if (!(data & PORT_LEARN_DISABLE))
+			ksz_pwrite8(dev, port, P_STP_CTRL,
+				    data | PORT_LEARN_DISABLE);
+		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+		ksz_pwrite8(dev, port, P_STP_CTRL, data);
+	} else {
+		/* flush all */
+		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
+	}
+}
+
+static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
+				       bool flag)
+{
+	struct ksz_device *dev = ds->priv;
+
+	if (flag) {
+		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+			     PORT_VLAN_LOOKUP_VID_0, true);
+		ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
+			      true);
+		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
+	} else {
+		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
+		ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
+			      false);
+		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+			     PORT_VLAN_LOOKUP_VID_0, false);
+	}
+
+	return 0;
+}
+
+static void ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
+				  const struct switchdev_obj_port_vlan *vlan)
+{
+	struct ksz_device *dev = ds->priv;
+	u32 vlan_table[3];
+	u16 vid;
+	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+		if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
+			dev_dbg(dev->dev, "Failed to get vlan table\n");
+			return;
+		}
+
+		vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
+		if (untagged)
+			vlan_table[1] |= BIT(port);
+		else
+			vlan_table[1] &= ~BIT(port);
+		vlan_table[1] &= ~(BIT(dev->cpu_port));
+
+		vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+
+		if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
+			dev_dbg(dev->dev, "Failed to set vlan table\n");
+			return;
+		}
+
+		/* change PVID */
+		if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+			ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
+	}
+}
+
+static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
+				 const struct switchdev_obj_port_vlan *vlan)
+{
+	struct ksz_device *dev = ds->priv;
+	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+	u32 vlan_table[3];
+	u16 vid;
+	u16 pvid;
+
+	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
+	pvid = pvid & 0xFFF;
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+		if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
+			dev_dbg(dev->dev, "Failed to get vlan table\n");
+			return -ETIMEDOUT;
+		}
+
+		vlan_table[2] &= ~BIT(port);
+
+		if (pvid == vid)
+			pvid = 1;
+
+		if (untagged)
+			vlan_table[1] &= ~BIT(port);
+
+		if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
+			dev_dbg(dev->dev, "Failed to set vlan table\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
+
+	return 0;
+}
+
+static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
+				const unsigned char *addr, u16 vid)
+{
+	struct ksz_device *dev = ds->priv;
+	u32 alu_table[4];
+	u32 data;
+	int ret = 0;
+
+	mutex_lock(&dev->alu_mutex);
+
+	/* find any entry with mac & vid */
+	data = vid << ALU_FID_INDEX_S;
+	data |= ((addr[0] << 8) | addr[1]);
+	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+	data = ((addr[2] << 24) | (addr[3] << 16));
+	data |= ((addr[4] << 8) | addr[5]);
+	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+	/* start read operation */
+	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+	/* wait to be finished */
+	ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+	if (ret < 0) {
+		dev_dbg(dev->dev, "Failed to read ALU\n");
+		goto exit;
+	}
+
+	/* read ALU entry */
+	ksz9477_read_table(dev, alu_table);
+
+	/* update ALU entry */
+	alu_table[0] = ALU_V_STATIC_VALID;
+	alu_table[1] |= BIT(port);
+	if (vid)
+		alu_table[1] |= ALU_V_USE_FID;
+	alu_table[2] = (vid << ALU_V_FID_S);
+	alu_table[2] |= ((addr[0] << 8) | addr[1]);
+	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
+	alu_table[3] |= ((addr[4] << 8) | addr[5]);
+
+	ksz9477_write_table(dev, alu_table);
+
+	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+	/* wait to be finished */
+	ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+	if (ret < 0)
+		dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+	mutex_unlock(&dev->alu_mutex);
+
+	return ret;
+}
+
+static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
+				const unsigned char *addr, u16 vid)
+{
+	struct ksz_device *dev = ds->priv;
+	u32 alu_table[4];
+	u32 data;
+	int ret = 0;
+
+	mutex_lock(&dev->alu_mutex);
+
+	/* read any entry with mac & vid */
+	data = vid << ALU_FID_INDEX_S;
+	data |= ((addr[0] << 8) | addr[1]);
+	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+	data = ((addr[2] << 24) | (addr[3] << 16));
+	data |= ((addr[4] << 8) | addr[5]);
+	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+	/* start read operation */
+	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+	/* wait to be finished */
+	ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+	if (ret < 0) {
+		dev_dbg(dev->dev, "Failed to read ALU\n");
+		goto exit;
+	}
+
+	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
+	if (alu_table[0] & ALU_V_STATIC_VALID) {
+		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
+		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
+		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+
+		/* clear forwarding port */
+		alu_table[2] &= ~BIT(port);
+
+		/* if there is no port to forward, clear table */
+		if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+			alu_table[0] = 0;
+			alu_table[1] = 0;
+			alu_table[2] = 0;
+			alu_table[3] = 0;
+		}
+	} else {
+		alu_table[0] = 0;
+		alu_table[1] = 0;
+		alu_table[2] = 0;
+		alu_table[3] = 0;
+	}
+
+	ksz9477_write_table(dev, alu_table);
+
+	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+	/* wait to be finished */
+	ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+	if (ret < 0)
+		dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+	mutex_unlock(&dev->alu_mutex);
+
+	return ret;
+}
+
+static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
+{
+	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
+	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
+	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
+	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
+			ALU_V_PRIO_AGE_CNT_M;
+	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
+
+	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
+	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
+	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
+
+	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
+
+	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
+	alu->mac[1] = alu_table[2] & 0xFF;
+	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
+	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
+	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
+	alu->mac[5] = alu_table[3] & 0xFF;
+}
+
+static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
+				 dsa_fdb_dump_cb_t *cb, void *data)
+{
+	struct ksz_device *dev = ds->priv;
+	int ret = 0;
+	u32 ksz_data;
+	u32 alu_table[4];
+	struct alu_struct alu;
+	int timeout;
+
+	mutex_lock(&dev->alu_mutex);
+
+	/* start ALU search */
+	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
+
+	do {
+		timeout = 1000;
+		do {
+			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
+			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
+				break;
+			usleep_range(1, 10);
+		} while (timeout-- > 0);
+
+		if (!timeout) {
+			dev_dbg(dev->dev, "Failed to search ALU\n");
+			ret = -ETIMEDOUT;
+			goto exit;
+		}
+
+		/* read ALU table */
+		ksz9477_read_table(dev, alu_table);
+
+		ksz9477_convert_alu(&alu, alu_table);
+
+		if (alu.port_forward & BIT(port)) {
+			ret = cb(alu.mac, alu.fid, alu.is_static, data);
+			if (ret)
+				goto exit;
+		}
+	} while (ksz_data & ALU_START);
+
+exit:
+
+	/* stop ALU search */
+	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
+
+	mutex_unlock(&dev->alu_mutex);
+
+	return ret;
+}
+
+static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
+				 const struct switchdev_obj_port_mdb *mdb)
+{
+	struct ksz_device *dev = ds->priv;
+	u32 static_table[4];
+	u32 data;
+	int index;
+	u32 mac_hi, mac_lo;
+
+	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+	mutex_lock(&dev->alu_mutex);
+
+	for (index = 0; index < dev->num_statics; index++) {
+		/* find empty slot first */
+		data = (index << ALU_STAT_INDEX_S) |
+			ALU_STAT_READ | ALU_STAT_START;
+		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+		/* wait to be finished */
+		if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
+			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+			goto exit;
+		}
+
+		/* read ALU static table */
+		ksz9477_read_table(dev, static_table);
+
+		if (static_table[0] & ALU_V_STATIC_VALID) {
+			/* check this has same vid & mac address */
+			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+			    static_table[3] == mac_lo) {
+				/* found matching one */
+				break;
+			}
+		} else {
+			/* found empty one */
+			break;
+		}
+	}
+
+	/* no available entry */
+	if (index == dev->num_statics)
+		goto exit;
+
+	/* add entry */
+	static_table[0] = ALU_V_STATIC_VALID;
+	static_table[1] |= BIT(port);
+	if (mdb->vid)
+		static_table[1] |= ALU_V_USE_FID;
+	static_table[2] = (mdb->vid << ALU_V_FID_S);
+	static_table[2] |= mac_hi;
+	static_table[3] = mac_lo;
+
+	ksz9477_write_table(dev, static_table);
+
+	data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+	/* wait to be finished */
+	if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
+		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+	mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
+				const struct switchdev_obj_port_mdb *mdb)
+{
+	struct ksz_device *dev = ds->priv;
+	u32 static_table[4];
+	u32 data;
+	int index;
+	int ret = 0;
+	u32 mac_hi, mac_lo;
+
+	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+	mutex_lock(&dev->alu_mutex);
+
+	for (index = 0; index < dev->num_statics; index++) {
+		/* find empty slot first */
+		data = (index << ALU_STAT_INDEX_S) |
+			ALU_STAT_READ | ALU_STAT_START;
+		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+		/* wait to be finished */
+		ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+		if (ret < 0) {
+			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+			goto exit;
+		}
+
+		/* read ALU static table */
+		ksz9477_read_table(dev, static_table);
+
+		if (static_table[0] & ALU_V_STATIC_VALID) {
+			/* check this has same vid & mac address */
+
+			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+			    static_table[3] == mac_lo) {
+				/* found matching one */
+				break;
+			}
+		}
+	}
+
+	/* no available entry */
+	if (index == dev->num_statics)
+		goto exit;
+
+	/* clear port */
+	static_table[1] &= ~BIT(port);
+
+	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
+		/* delete entry */
+		static_table[0] = 0;
+		static_table[1] = 0;
+		static_table[2] = 0;
+		static_table[3] = 0;
+	}
+
+	ksz9477_write_table(dev, static_table);
+
+	data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+	/* wait to be finished */
+	ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+	if (ret < 0)
+		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+	mutex_unlock(&dev->alu_mutex);
+
+	return ret;
+}
+
+static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
+				   struct dsa_mall_mirror_tc_entry *mirror,
+				   bool ingress)
+{
+	struct ksz_device *dev = ds->priv;
+
+	if (ingress)
+		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+	else
+		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+
+	ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+
+	/* configure mirror port */
+	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+		     PORT_MIRROR_SNIFFER, true);
+
+	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+	return 0;
+}
+
+static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
+				    struct dsa_mall_mirror_tc_entry *mirror)
+{
+	struct ksz_device *dev = ds->priv;
+	u8 data;
+
+	if (mirror->ingress)
+		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+	else
+		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+
+	ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+
+	if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
+		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+			     PORT_MIRROR_SNIFFER, false);
+}
+
+static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+	u8 data8;
+	u8 member;
+	u16 data16;
+	struct ksz_port *p = &dev->ports[port];
+
+	/* enable tag tail for host port */
+	if (cpu_port)
+		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
+			     true);
+
+	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
+
+	/* set back pressure */
+	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
+
+	/* enable broadcast storm limit */
+	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+	/* disable DiffServ priority */
+	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
+
+	/* replace priority */
+	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
+		     false);
+	ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
+			   MTI_PVID_REPLACE, false);
+
+	/* enable 802.1p priority */
+	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+	if (port < dev->phy_port_cnt) {
+		/* do not force flow control */
+		ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+			     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+			     false);
+
+	} else {
+		/* force flow control */
+		ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+			     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+			     true);
+
+		/* configure MAC to 1G & RGMII mode */
+		ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
+		data8 &= ~PORT_MII_NOT_1GBIT;
+		data8 &= ~PORT_MII_SEL_M;
+		switch (dev->interface) {
+		case PHY_INTERFACE_MODE_MII:
+			data8 |= PORT_MII_NOT_1GBIT;
+			data8 |= PORT_MII_SEL;
+			p->phydev.speed = SPEED_100;
+			break;
+		case PHY_INTERFACE_MODE_RMII:
+			data8 |= PORT_MII_NOT_1GBIT;
+			data8 |= PORT_RMII_SEL;
+			p->phydev.speed = SPEED_100;
+			break;
+		case PHY_INTERFACE_MODE_GMII:
+			data8 |= PORT_GMII_SEL;
+			p->phydev.speed = SPEED_1000;
+			break;
+		default:
+			data8 &= ~PORT_RGMII_ID_IG_ENABLE;
+			data8 &= ~PORT_RGMII_ID_EG_ENABLE;
+			if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+			    dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+				data8 |= PORT_RGMII_ID_IG_ENABLE;
+			if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+			    dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+				data8 |= PORT_RGMII_ID_EG_ENABLE;
+			data8 |= PORT_RGMII_SEL;
+			p->phydev.speed = SPEED_1000;
+			break;
+		}
+		ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
+		p->phydev.duplex = 1;
+	}
+	if (cpu_port) {
+		member = dev->port_mask;
+		dev->on_ports = dev->host_mask;
+		dev->live_ports = dev->host_mask;
+	} else {
+		member = dev->host_mask | p->vid_member;
+		dev->on_ports |= (1 << port);
+
+		/* Link was detected before port is enabled. */
+		if (p->phydev.link)
+			dev->live_ports |= (1 << port);
+	}
+	ksz9477_cfg_port_member(dev, port, member);
+
+	/* clear pending interrupts */
+	if (port < dev->phy_port_cnt)
+		ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
+}
+
+static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+{
+	struct ksz_device *dev = ds->priv;
+	struct ksz_port *p;
+	int i;
+
+	ds->num_ports = dev->port_cnt;
+
+	for (i = 0; i < dev->port_cnt; i++) {
+		if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+			dev->cpu_port = i;
+			dev->host_mask = (1 << dev->cpu_port);
+			dev->port_mask |= dev->host_mask;
+
+			/* enable cpu port */
+			ksz9477_port_setup(dev, i, true);
+			p = &dev->ports[dev->cpu_port];
+			p->vid_member = dev->port_mask;
+			p->on = 1;
+		}
+	}
+
+	dev->member = dev->host_mask;
+
+	for (i = 0; i < dev->mib_port_cnt; i++) {
+		if (i == dev->cpu_port)
+			continue;
+		p = &dev->ports[i];
+
+		/* Initialize to non-zero so that ksz_cfg_port_member() will
+		 * be called.
+		 */
+		p->vid_member = (1 << i);
+		p->member = dev->port_mask;
+		ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+		p->on = 1;
+		if (i < dev->phy_port_cnt)
+			p->phy = 1;
+		if (dev->chip_id == 0x00947700 && i == 6) {
+			p->sgmii = 1;
+
+			/* SGMII PHY detection code is not implemented yet. */
+			p->phy = 0;
+		}
+	}
+}
+
+static int ksz9477_setup(struct dsa_switch *ds)
+{
+	struct ksz_device *dev = ds->priv;
+	int ret = 0;
+
+	dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+				       dev->num_vlans, GFP_KERNEL);
+	if (!dev->vlan_cache)
+		return -ENOMEM;
+
+	ret = ksz9477_reset_switch(dev);
+	if (ret) {
+		dev_err(ds->dev, "failed to reset switch\n");
+		return ret;
+	}
+
+	/* accept packet up to 2000bytes */
+	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+
+	ksz9477_config_cpu_port(ds);
+
+	ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
+
+	/* queue based egress rate limit */
+	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
+
+	/* start switch */
+	ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
+
+	return 0;
+}
+
+static const struct dsa_switch_ops ksz9477_switch_ops = {
+	.get_tag_protocol	= ksz9477_get_tag_protocol,
+	.setup			= ksz9477_setup,
+	.phy_read		= ksz9477_phy_read16,
+	.phy_write		= ksz9477_phy_write16,
+	.port_enable		= ksz_enable_port,
+	.port_disable		= ksz_disable_port,
+	.get_strings		= ksz9477_get_strings,
+	.get_ethtool_stats	= ksz_get_ethtool_stats,
+	.get_sset_count		= ksz_sset_count,
+	.port_bridge_join	= ksz_port_bridge_join,
+	.port_bridge_leave	= ksz_port_bridge_leave,
+	.port_stp_state_set	= ksz9477_port_stp_state_set,
+	.port_fast_age		= ksz_port_fast_age,
+	.port_vlan_filtering	= ksz9477_port_vlan_filtering,
+	.port_vlan_prepare	= ksz_port_vlan_prepare,
+	.port_vlan_add		= ksz9477_port_vlan_add,
+	.port_vlan_del		= ksz9477_port_vlan_del,
+	.port_fdb_dump		= ksz9477_port_fdb_dump,
+	.port_fdb_add		= ksz9477_port_fdb_add,
+	.port_fdb_del		= ksz9477_port_fdb_del,
+	.port_mdb_prepare       = ksz_port_mdb_prepare,
+	.port_mdb_add           = ksz9477_port_mdb_add,
+	.port_mdb_del           = ksz9477_port_mdb_del,
+	.port_mirror_add	= ksz9477_port_mirror_add,
+	.port_mirror_del	= ksz9477_port_mirror_del,
+};
+
+static u32 ksz9477_get_port_addr(int port, int offset)
+{
+	return PORT_CTRL_ADDR(port, offset);
+}
+
+static int ksz9477_switch_detect(struct ksz_device *dev)
+{
+	u8 data8;
+	u32 id32;
+	int ret;
+
+	/* turn off SPI DO Edge select */
+	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+	if (ret)
+		return ret;
+
+	data8 &= ~SPI_AUTO_EDGE_DETECTION;
+	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+	if (ret)
+		return ret;
+
+	/* read chip id */
+	ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
+	if (ret)
+		return ret;
+
+	/* Number of ports can be reduced depending on chip. */
+	dev->mib_port_cnt = TOTAL_PORT_NUM;
+	dev->phy_port_cnt = 5;
+
+	dev->chip_id = id32;
+
+	return 0;
+}
+
+struct ksz_chip_data {
+	u32 chip_id;
+	const char *dev_name;
+	int num_vlans;
+	int num_alus;
+	int num_statics;
+	int cpu_ports;
+	int port_cnt;
+};
+
+static const struct ksz_chip_data ksz9477_switch_chips[] = {
+	{
+		.chip_id = 0x00947700,
+		.dev_name = "KSZ9477",
+		.num_vlans = 4096,
+		.num_alus = 4096,
+		.num_statics = 16,
+		.cpu_ports = 0x7F,	/* can be configured as cpu port */
+		.port_cnt = 7,		/* total physical port count */
+	},
+	{
+		.chip_id = 0x00989700,
+		.dev_name = "KSZ9897",
+		.num_vlans = 4096,
+		.num_alus = 4096,
+		.num_statics = 16,
+		.cpu_ports = 0x7F,	/* can be configured as cpu port */
+		.port_cnt = 7,		/* total physical port count */
+	},
+};
+
+static int ksz9477_switch_init(struct ksz_device *dev)
+{
+	int i;
+
+	dev->ds->ops = &ksz9477_switch_ops;
+
+	for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) {
+		const struct ksz_chip_data *chip = &ksz9477_switch_chips[i];
+
+		if (dev->chip_id == chip->chip_id) {
+			dev->name = chip->dev_name;
+			dev->num_vlans = chip->num_vlans;
+			dev->num_alus = chip->num_alus;
+			dev->num_statics = chip->num_statics;
+			dev->port_cnt = chip->port_cnt;
+			dev->cpu_ports = chip->cpu_ports;
+
+			break;
+		}
+	}
+
+	/* no switch found */
+	if (!dev->port_cnt)
+		return -ENODEV;
+
+	dev->port_mask = (1 << dev->port_cnt) - 1;
+
+	dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
+	dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
+
+	i = dev->mib_port_cnt;
+	dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i,
+				  GFP_KERNEL);
+	if (!dev->ports)
+		return -ENOMEM;
+	for (i = 0; i < dev->mib_port_cnt; i++) {
+		dev->ports[i].mib.counters =
+			devm_kzalloc(dev->dev,
+				     sizeof(u64) *
+				     (TOTAL_SWITCH_COUNTER_NUM + 1),
+				     GFP_KERNEL);
+		if (!dev->ports[i].mib.counters)
+			return -ENOMEM;
+	}
+	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+
+	return 0;
+}
+
+static void ksz9477_switch_exit(struct ksz_device *dev)
+{
+	ksz9477_reset_switch(dev);
+}
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+	.get_port_addr = ksz9477_get_port_addr,
+	.cfg_port_member = ksz9477_cfg_port_member,
+	.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+	.port_setup = ksz9477_port_setup,
+	.shutdown = ksz9477_reset_switch,
+	.detect = ksz9477_switch_detect,
+	.init = ksz9477_switch_init,
+	.exit = ksz9477_switch_exit,
+};
+
+int ksz9477_switch_register(struct ksz_device *dev)
+{
+	return ksz_switch_register(dev, &ksz9477_dev_ops);
+}
+EXPORT_SYMBOL(ksz9477_switch_register);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
similarity index 98%
rename from drivers/net/dsa/microchip/ksz_9477_reg.h
rename to drivers/net/dsa/microchip/ksz9477_reg.h
index 6aa6752..2938e89 100644
--- a/drivers/net/dsa/microchip/ksz_9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1,19 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
  * Microchip KSZ9477 register definitions
  *
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
  */
 
 #ifndef __KSZ9477_REGS_H
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
new file mode 100644
index 0000000..d757ba1
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 series register access through SPI
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_priv.h"
+#include "ksz_spi.h"
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD			3
+#define KS_SPIOP_WR			2
+
+#define SPI_ADDR_SHIFT			24
+#define SPI_ADDR_MASK			(BIT(SPI_ADDR_SHIFT) - 1)
+#define SPI_TURNAROUND_SHIFT		5
+
+/* Enough to read all switch port registers. */
+#define SPI_TX_BUF_LEN			0x100
+
+static int ksz9477_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
+				unsigned int len)
+{
+	u32 txbuf;
+	int ret;
+
+	txbuf = reg & SPI_ADDR_MASK;
+	txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
+	txbuf <<= SPI_TURNAROUND_SHIFT;
+	txbuf = cpu_to_be32(txbuf);
+
+	ret = spi_write_then_read(spi, &txbuf, 4, val, len);
+	return ret;
+}
+
+static int ksz9477_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
+				 unsigned int len)
+{
+	u32 *txbuf = (u32 *)val;
+
+	*txbuf = reg & SPI_ADDR_MASK;
+	*txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
+	*txbuf <<= SPI_TURNAROUND_SHIFT;
+	*txbuf = cpu_to_be32(*txbuf);
+
+	return spi_write(spi, txbuf, 4 + len);
+}
+
+static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
+			unsigned int len)
+{
+	struct spi_device *spi = dev->priv;
+
+	return ksz9477_spi_read_reg(spi, reg, data, len);
+}
+
+static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
+			 unsigned int len)
+{
+	struct spi_device *spi = dev->priv;
+
+	if (len > SPI_TX_BUF_LEN)
+		len = SPI_TX_BUF_LEN;
+	memcpy(&dev->txbuf[4], data, len);
+	return ksz9477_spi_write_reg(spi, reg, dev->txbuf, len);
+}
+
+static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+	int ret;
+
+	*val = 0;
+	ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
+	if (!ret) {
+		*val = be32_to_cpu(*val);
+		/* convert to 24bit */
+		*val >>= 8;
+	}
+
+	return ret;
+}
+
+static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+	/* make it to big endian 24bit from MSB */
+	value <<= 8;
+	value = cpu_to_be32(value);
+	return ksz_spi_write(dev, reg, &value, 3);
+}
+
+static const struct ksz_io_ops ksz9477_spi_ops = {
+	.read8 = ksz_spi_read8,
+	.read16 = ksz_spi_read16,
+	.read24 = ksz_spi_read24,
+	.read32 = ksz_spi_read32,
+	.write8 = ksz_spi_write8,
+	.write16 = ksz_spi_write16,
+	.write24 = ksz_spi_write24,
+	.write32 = ksz_spi_write32,
+	.get = ksz_spi_get,
+	.set = ksz_spi_set,
+};
+
+static int ksz9477_spi_probe(struct spi_device *spi)
+{
+	struct ksz_device *dev;
+	int ret;
+
+	dev = ksz_switch_alloc(&spi->dev, &ksz9477_spi_ops, spi);
+	if (!dev)
+		return -ENOMEM;
+
+	if (spi->dev.platform_data)
+		dev->pdata = spi->dev.platform_data;
+
+	dev->txbuf = devm_kzalloc(dev->dev, 4 + SPI_TX_BUF_LEN, GFP_KERNEL);
+
+	ret = ksz9477_switch_register(dev);
+
+	/* Main DSA driver may not be started yet. */
+	if (ret)
+		return ret;
+
+	spi_set_drvdata(spi, dev);
+
+	return 0;
+}
+
+static int ksz9477_spi_remove(struct spi_device *spi)
+{
+	struct ksz_device *dev = spi_get_drvdata(spi);
+
+	if (dev)
+		ksz_switch_remove(dev);
+
+	return 0;
+}
+
+static void ksz9477_spi_shutdown(struct spi_device *spi)
+{
+	struct ksz_device *dev = spi_get_drvdata(spi);
+
+	if (dev && dev->dev_ops->shutdown)
+		dev->dev_ops->shutdown(dev);
+}
+
+static const struct of_device_id ksz9477_dt_ids[] = {
+	{ .compatible = "microchip,ksz9477" },
+	{ .compatible = "microchip,ksz9897" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+
+static struct spi_driver ksz9477_spi_driver = {
+	.driver = {
+		.name	= "ksz9477-switch",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(ksz9477_dt_ids),
+	},
+	.probe	= ksz9477_spi_probe,
+	.remove	= ksz9477_spi_remove,
+	.shutdown = ksz9477_spi_shutdown,
+};
+
+module_spi_driver(ksz9477_spi_driver);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 86b6464..9705808 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Microchip switch driver main logic
  *
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
  */
 
 #include <linux/delay.h>
@@ -25,891 +14,206 @@
 #include <linux/phy.h>
 #include <linux/etherdevice.h>
 #include <linux/if_bridge.h>
+#include <linux/of_net.h>
 #include <net/dsa.h>
 #include <net/switchdev.h>
 
 #include "ksz_priv.h"
 
-static const struct {
-	int index;
-	char string[ETH_GSTRING_LEN];
-} mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
-	{ 0x00, "rx_hi" },
-	{ 0x01, "rx_undersize" },
-	{ 0x02, "rx_fragments" },
-	{ 0x03, "rx_oversize" },
-	{ 0x04, "rx_jabbers" },
-	{ 0x05, "rx_symbol_err" },
-	{ 0x06, "rx_crc_err" },
-	{ 0x07, "rx_align_err" },
-	{ 0x08, "rx_mac_ctrl" },
-	{ 0x09, "rx_pause" },
-	{ 0x0A, "rx_bcast" },
-	{ 0x0B, "rx_mcast" },
-	{ 0x0C, "rx_ucast" },
-	{ 0x0D, "rx_64_or_less" },
-	{ 0x0E, "rx_65_127" },
-	{ 0x0F, "rx_128_255" },
-	{ 0x10, "rx_256_511" },
-	{ 0x11, "rx_512_1023" },
-	{ 0x12, "rx_1024_1522" },
-	{ 0x13, "rx_1523_2000" },
-	{ 0x14, "rx_2001" },
-	{ 0x15, "tx_hi" },
-	{ 0x16, "tx_late_col" },
-	{ 0x17, "tx_pause" },
-	{ 0x18, "tx_bcast" },
-	{ 0x19, "tx_mcast" },
-	{ 0x1A, "tx_ucast" },
-	{ 0x1B, "tx_deferred" },
-	{ 0x1C, "tx_total_col" },
-	{ 0x1D, "tx_exc_col" },
-	{ 0x1E, "tx_single_col" },
-	{ 0x1F, "tx_mult_col" },
-	{ 0x80, "rx_total" },
-	{ 0x81, "tx_total" },
-	{ 0x82, "rx_discards" },
-	{ 0x83, "tx_discards" },
-};
-
-static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+void ksz_update_port_member(struct ksz_device *dev, int port)
 {
-	u8 data;
-
-	ksz_read8(dev, addr, &data);
-	if (set)
-		data |= bits;
-	else
-		data &= ~bits;
-	ksz_write8(dev, addr, data);
-}
-
-static void ksz_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
-{
-	u32 data;
-
-	ksz_read32(dev, addr, &data);
-	if (set)
-		data |= bits;
-	else
-		data &= ~bits;
-	ksz_write32(dev, addr, data);
-}
-
-static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
-			 bool set)
-{
-	u32 addr;
-	u8 data;
-
-	addr = PORT_CTRL_ADDR(port, offset);
-	ksz_read8(dev, addr, &data);
-
-	if (set)
-		data |= bits;
-	else
-		data &= ~bits;
-
-	ksz_write8(dev, addr, data);
-}
-
-static void ksz_port_cfg32(struct ksz_device *dev, int port, int offset,
-			   u32 bits, bool set)
-{
-	u32 addr;
-	u32 data;
-
-	addr = PORT_CTRL_ADDR(port, offset);
-	ksz_read32(dev, addr, &data);
-
-	if (set)
-		data |= bits;
-	else
-		data &= ~bits;
-
-	ksz_write32(dev, addr, data);
-}
-
-static int wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
-	u8 data;
-
-	do {
-		ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
-		if (!(data & waiton))
-			break;
-		usleep_range(1, 10);
-	} while (timeout-- > 0);
-
-	if (timeout <= 0)
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-static int get_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
-{
-	struct ksz_device *dev = ds->priv;
-	int ret;
-
-	mutex_lock(&dev->vlan_mutex);
-
-	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
-	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
-
-	/* wait to be cleared */
-	ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
-	if (ret < 0) {
-		dev_dbg(dev->dev, "Failed to read vlan table\n");
-		goto exit;
-	}
-
-	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
-	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
-	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
-
-	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
-
-exit:
-	mutex_unlock(&dev->vlan_mutex);
-
-	return ret;
-}
-
-static int set_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
-{
-	struct ksz_device *dev = ds->priv;
-	int ret;
-
-	mutex_lock(&dev->vlan_mutex);
-
-	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
-	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
-	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
-
-	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
-	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
-
-	/* wait to be cleared */
-	ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
-	if (ret < 0) {
-		dev_dbg(dev->dev, "Failed to write vlan table\n");
-		goto exit;
-	}
-
-	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
-
-	/* update vlan cache table */
-	dev->vlan_cache[vid].table[0] = vlan_table[0];
-	dev->vlan_cache[vid].table[1] = vlan_table[1];
-	dev->vlan_cache[vid].table[2] = vlan_table[2];
-
-exit:
-	mutex_unlock(&dev->vlan_mutex);
-
-	return ret;
-}
-
-static void read_table(struct dsa_switch *ds, u32 *table)
-{
-	struct ksz_device *dev = ds->priv;
-
-	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
-	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
-	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
-	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
-}
-
-static void write_table(struct dsa_switch *ds, u32 *table)
-{
-	struct ksz_device *dev = ds->priv;
-
-	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
-	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
-	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
-	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
-}
-
-static int wait_alu_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
-	u32 data;
-
-	do {
-		ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
-		if (!(data & waiton))
-			break;
-		usleep_range(1, 10);
-	} while (timeout-- > 0);
-
-	if (timeout <= 0)
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-static int wait_alu_sta_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
-	u32 data;
-
-	do {
-		ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
-		if (!(data & waiton))
-			break;
-		usleep_range(1, 10);
-	} while (timeout-- > 0);
-
-	if (timeout <= 0)
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-static int ksz_reset_switch(struct dsa_switch *ds)
-{
-	struct ksz_device *dev = ds->priv;
-	u8 data8;
-	u16 data16;
-	u32 data32;
-
-	/* reset switch */
-	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
-
-	/* turn off SPI DO Edge select */
-	ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
-	data8 &= ~SPI_AUTO_EDGE_DETECTION;
-	ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
-
-	/* default configuration */
-	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
-	data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
-	      SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
-	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
-
-	/* disable interrupts */
-	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
-	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
-	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
-
-	/* set broadcast storm protection 10% rate */
-	ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
-	data16 &= ~BROADCAST_STORM_RATE;
-	data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
-	ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
-
-	return 0;
-}
-
-static void port_setup(struct ksz_device *dev, int port, bool cpu_port)
-{
-	u8 data8;
-	u16 data16;
-
-	/* enable tag tail for host port */
-	if (cpu_port)
-		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
-			     true);
-
-	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
-
-	/* set back pressure */
-	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
-
-	/* set flow control */
-	ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
-		     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, true);
-
-	/* enable broadcast storm limit */
-	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
-
-	/* disable DiffServ priority */
-	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
-
-	/* replace priority */
-	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
-		     false);
-	ksz_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
-		       MTI_PVID_REPLACE, false);
-
-	/* enable 802.1p priority */
-	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
-
-	/* configure MAC to 1G & RGMII mode */
-	ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
-	data8 |= PORT_RGMII_ID_EG_ENABLE;
-	data8 &= ~PORT_MII_NOT_1GBIT;
-	data8 &= ~PORT_MII_SEL_M;
-	data8 |= PORT_RGMII_SEL;
-	ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
-
-	/* clear pending interrupts */
-	ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
-}
-
-static void ksz_config_cpu_port(struct dsa_switch *ds)
-{
-	struct ksz_device *dev = ds->priv;
+	struct ksz_port *p;
 	int i;
 
-	ds->num_ports = dev->port_cnt;
+	for (i = 0; i < dev->port_cnt; i++) {
+		if (i == port || i == dev->cpu_port)
+			continue;
+		p = &dev->ports[i];
+		if (!(dev->member & (1 << i)))
+			continue;
 
-	for (i = 0; i < ds->num_ports; i++) {
-		if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
-			dev->cpu_port = i;
-
-			/* enable cpu port */
-			port_setup(dev, i, true);
-		}
+		/* Port is a member of the bridge and is forwarding. */
+		if (p->stp_state == BR_STATE_FORWARDING &&
+		    p->member != dev->member)
+			dev->dev_ops->cfg_port_member(dev, i, dev->member);
 	}
 }
+EXPORT_SYMBOL_GPL(ksz_update_port_member);
 
-static int ksz_setup(struct dsa_switch *ds)
+int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
 {
 	struct ksz_device *dev = ds->priv;
-	int ret = 0;
+	u16 val = 0xffff;
 
-	dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
-				       dev->num_vlans, GFP_KERNEL);
-	if (!dev->vlan_cache)
-		return -ENOMEM;
-
-	ret = ksz_reset_switch(ds);
-	if (ret) {
-		dev_err(ds->dev, "failed to reset switch\n");
-		return ret;
-	}
-
-	/* accept packet up to 2000bytes */
-	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
-
-	ksz_config_cpu_port(ds);
-
-	ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
-
-	/* queue based egress rate limit */
-	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
-
-	/* start switch */
-	ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
-	return 0;
-}
-
-static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
-						  int port)
-{
-	return DSA_TAG_PROTO_KSZ;
-}
-
-static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
-{
-	struct ksz_device *dev = ds->priv;
-	u16 val = 0;
-
-	ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+	dev->dev_ops->r_phy(dev, addr, reg, &val);
 
 	return val;
 }
+EXPORT_SYMBOL_GPL(ksz_phy_read16);
 
-static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
 {
 	struct ksz_device *dev = ds->priv;
 
-	ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+	dev->dev_ops->w_phy(dev, addr, reg, val);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ksz_phy_write16);
 
-static int ksz_enable_port(struct dsa_switch *ds, int port,
-			   struct phy_device *phy)
+int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
 {
 	struct ksz_device *dev = ds->priv;
 
-	/* setup slave port */
-	port_setup(dev, port, false);
-
-	return 0;
-}
-
-static void ksz_disable_port(struct dsa_switch *ds, int port,
-			     struct phy_device *phy)
-{
-	struct ksz_device *dev = ds->priv;
-
-	/* there is no port disable */
-	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
-}
-
-static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
-{
 	if (sset != ETH_SS_STATS)
 		return 0;
 
-	return TOTAL_SWITCH_COUNTER_NUM;
+	return dev->mib_cnt;
 }
+EXPORT_SYMBOL_GPL(ksz_sset_count);
 
-static void ksz_get_strings(struct dsa_switch *ds, int port,
-			    u32 stringset, uint8_t *buf)
-{
-	int i;
-
-	if (stringset != ETH_SS_STATS)
-		return;
-
-	for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
-		memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
-		       ETH_GSTRING_LEN);
-	}
-}
-
-static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
-				  uint64_t *buf)
-{
-	struct ksz_device *dev = ds->priv;
-	int i;
-	u32 data;
-	int timeout;
-
-	mutex_lock(&dev->stats_mutex);
-
-	for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
-		data = MIB_COUNTER_READ;
-		data |= ((mib_names[i].index & 0xFF) << MIB_COUNTER_INDEX_S);
-		ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
-
-		timeout = 1000;
-		do {
-			ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
-				    &data);
-			usleep_range(1, 10);
-			if (!(data & MIB_COUNTER_READ))
-				break;
-		} while (timeout-- > 0);
-
-		/* failed to read MIB. get out of loop */
-		if (!timeout) {
-			dev_dbg(dev->dev, "Failed to get MIB\n");
-			break;
-		}
-
-		/* count resets upon read */
-		ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
-
-		dev->mib_value[i] += (uint64_t)data;
-		buf[i] = dev->mib_value[i];
-	}
-
-	mutex_unlock(&dev->stats_mutex);
-}
-
-static void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
-	struct ksz_device *dev = ds->priv;
-	u8 data;
-
-	ksz_pread8(dev, port, P_STP_CTRL, &data);
-	data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
-	switch (state) {
-	case BR_STATE_DISABLED:
-		data |= PORT_LEARN_DISABLE;
-		break;
-	case BR_STATE_LISTENING:
-		data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-		break;
-	case BR_STATE_LEARNING:
-		data |= PORT_RX_ENABLE;
-		break;
-	case BR_STATE_FORWARDING:
-		data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-		break;
-	case BR_STATE_BLOCKING:
-		data |= PORT_LEARN_DISABLE;
-		break;
-	default:
-		dev_err(ds->dev, "invalid STP state: %d\n", state);
-		return;
-	}
-
-	ksz_pwrite8(dev, port, P_STP_CTRL, data);
-}
-
-static void ksz_port_fast_age(struct dsa_switch *ds, int port)
-{
-	struct ksz_device *dev = ds->priv;
-	u8 data8;
-
-	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
-	data8 |= SW_FAST_AGING;
-	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
-
-	data8 &= ~SW_FAST_AGING;
-	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
-}
-
-static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
+int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+			 struct net_device *br)
 {
 	struct ksz_device *dev = ds->priv;
 
-	if (flag) {
-		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
-			     PORT_VLAN_LOOKUP_VID_0, true);
-		ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, true);
-		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
-	} else {
-		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
-		ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, false);
-		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
-			     PORT_VLAN_LOOKUP_VID_0, false);
-	}
+	dev->br_member |= (1 << port);
+
+	/* port_stp_state_set() will be called after to put the port in
+	 * appropriate state so there is no need to do anything.
+	 */
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
 
-static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
-				 const struct switchdev_obj_port_vlan *vlan)
+void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+			   struct net_device *br)
+{
+	struct ksz_device *dev = ds->priv;
+
+	dev->br_member &= ~(1 << port);
+	dev->member &= ~(1 << port);
+
+	/* port_stp_state_set() will be called after to put the port in
+	 * forwarding state so there is no need to do anything.
+	 */
+}
+EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
+
+void ksz_port_fast_age(struct dsa_switch *ds, int port)
+{
+	struct ksz_device *dev = ds->priv;
+
+	dev->dev_ops->flush_dyn_mac_table(dev, port);
+}
+EXPORT_SYMBOL_GPL(ksz_port_fast_age);
+
+int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
+			  const struct switchdev_obj_port_vlan *vlan)
 {
 	/* nothing needed */
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
 
-static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
-			      const struct switchdev_obj_port_vlan *vlan)
-{
-	struct ksz_device *dev = ds->priv;
-	u32 vlan_table[3];
-	u16 vid;
-	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-
-	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
-		if (get_vlan_table(ds, vid, vlan_table)) {
-			dev_dbg(dev->dev, "Failed to get vlan table\n");
-			return;
-		}
-
-		vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
-		if (untagged)
-			vlan_table[1] |= BIT(port);
-		else
-			vlan_table[1] &= ~BIT(port);
-		vlan_table[1] &= ~(BIT(dev->cpu_port));
-
-		vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
-
-		if (set_vlan_table(ds, vid, vlan_table)) {
-			dev_dbg(dev->dev, "Failed to set vlan table\n");
-			return;
-		}
-
-		/* change PVID */
-		if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
-			ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
-	}
-}
-
-static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
-			     const struct switchdev_obj_port_vlan *vlan)
-{
-	struct ksz_device *dev = ds->priv;
-	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-	u32 vlan_table[3];
-	u16 vid;
-	u16 pvid;
-
-	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
-	pvid = pvid & 0xFFF;
-
-	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
-		if (get_vlan_table(ds, vid, vlan_table)) {
-			dev_dbg(dev->dev, "Failed to get vlan table\n");
-			return -ETIMEDOUT;
-		}
-
-		vlan_table[2] &= ~BIT(port);
-
-		if (pvid == vid)
-			pvid = 1;
-
-		if (untagged)
-			vlan_table[1] &= ~BIT(port);
-
-		if (set_vlan_table(ds, vid, vlan_table)) {
-			dev_dbg(dev->dev, "Failed to set vlan table\n");
-			return -ETIMEDOUT;
-		}
-	}
-
-	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
-
-	return 0;
-}
-
-struct alu_struct {
-	/* entry 1 */
-	u8	is_static:1;
-	u8	is_src_filter:1;
-	u8	is_dst_filter:1;
-	u8	prio_age:3;
-	u32	_reserv_0_1:23;
-	u8	mstp:3;
-	/* entry 2 */
-	u8	is_override:1;
-	u8	is_use_fid:1;
-	u32	_reserv_1_1:23;
-	u8	port_forward:7;
-	/* entry 3 & 4*/
-	u32	_reserv_2_1:9;
-	u8	fid:7;
-	u8	mac[ETH_ALEN];
-};
-
-static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
-			    const unsigned char *addr, u16 vid)
-{
-	struct ksz_device *dev = ds->priv;
-	u32 alu_table[4];
-	u32 data;
-	int ret = 0;
-
-	mutex_lock(&dev->alu_mutex);
-
-	/* find any entry with mac & vid */
-	data = vid << ALU_FID_INDEX_S;
-	data |= ((addr[0] << 8) | addr[1]);
-	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
-
-	data = ((addr[2] << 24) | (addr[3] << 16));
-	data |= ((addr[4] << 8) | addr[5]);
-	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
-
-	/* start read operation */
-	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
-
-	/* wait to be finished */
-	ret = wait_alu_ready(dev, ALU_START, 1000);
-	if (ret < 0) {
-		dev_dbg(dev->dev, "Failed to read ALU\n");
-		goto exit;
-	}
-
-	/* read ALU entry */
-	read_table(ds, alu_table);
-
-	/* update ALU entry */
-	alu_table[0] = ALU_V_STATIC_VALID;
-	alu_table[1] |= BIT(port);
-	if (vid)
-		alu_table[1] |= ALU_V_USE_FID;
-	alu_table[2] = (vid << ALU_V_FID_S);
-	alu_table[2] |= ((addr[0] << 8) | addr[1]);
-	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
-	alu_table[3] |= ((addr[4] << 8) | addr[5]);
-
-	write_table(ds, alu_table);
-
-	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
-
-	/* wait to be finished */
-	ret = wait_alu_ready(dev, ALU_START, 1000);
-	if (ret < 0)
-		dev_dbg(dev->dev, "Failed to write ALU\n");
-
-exit:
-	mutex_unlock(&dev->alu_mutex);
-
-	return ret;
-}
-
-static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
-			    const unsigned char *addr, u16 vid)
-{
-	struct ksz_device *dev = ds->priv;
-	u32 alu_table[4];
-	u32 data;
-	int ret = 0;
-
-	mutex_lock(&dev->alu_mutex);
-
-	/* read any entry with mac & vid */
-	data = vid << ALU_FID_INDEX_S;
-	data |= ((addr[0] << 8) | addr[1]);
-	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
-
-	data = ((addr[2] << 24) | (addr[3] << 16));
-	data |= ((addr[4] << 8) | addr[5]);
-	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
-
-	/* start read operation */
-	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
-
-	/* wait to be finished */
-	ret = wait_alu_ready(dev, ALU_START, 1000);
-	if (ret < 0) {
-		dev_dbg(dev->dev, "Failed to read ALU\n");
-		goto exit;
-	}
-
-	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
-	if (alu_table[0] & ALU_V_STATIC_VALID) {
-		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
-		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
-		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
-
-		/* clear forwarding port */
-		alu_table[2] &= ~BIT(port);
-
-		/* if there is no port to forward, clear table */
-		if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
-			alu_table[0] = 0;
-			alu_table[1] = 0;
-			alu_table[2] = 0;
-			alu_table[3] = 0;
-		}
-	} else {
-		alu_table[0] = 0;
-		alu_table[1] = 0;
-		alu_table[2] = 0;
-		alu_table[3] = 0;
-	}
-
-	write_table(ds, alu_table);
-
-	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
-
-	/* wait to be finished */
-	ret = wait_alu_ready(dev, ALU_START, 1000);
-	if (ret < 0)
-		dev_dbg(dev->dev, "Failed to write ALU\n");
-
-exit:
-	mutex_unlock(&dev->alu_mutex);
-
-	return ret;
-}
-
-static void convert_alu(struct alu_struct *alu, u32 *alu_table)
-{
-	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
-	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
-	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
-	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
-			ALU_V_PRIO_AGE_CNT_M;
-	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
-
-	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
-	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
-	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
-
-	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
-
-	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
-	alu->mac[1] = alu_table[2] & 0xFF;
-	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
-	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
-	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
-	alu->mac[5] = alu_table[3] & 0xFF;
-}
-
-static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
-			     dsa_fdb_dump_cb_t *cb, void *data)
+int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
+		      void *data)
 {
 	struct ksz_device *dev = ds->priv;
 	int ret = 0;
-	u32 ksz_data;
-	u32 alu_table[4];
+	u16 i = 0;
+	u16 entries = 0;
+	u8 timestamp = 0;
+	u8 fid;
+	u8 member;
 	struct alu_struct alu;
-	int timeout;
-
-	mutex_lock(&dev->alu_mutex);
-
-	/* start ALU search */
-	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
 
 	do {
-		timeout = 1000;
-		do {
-			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
-			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
-				break;
-			usleep_range(1, 10);
-		} while (timeout-- > 0);
-
-		if (!timeout) {
-			dev_dbg(dev->dev, "Failed to search ALU\n");
-			ret = -ETIMEDOUT;
-			goto exit;
-		}
-
-		/* read ALU table */
-		read_table(ds, alu_table);
-
-		convert_alu(&alu, alu_table);
-
-		if (alu.port_forward & BIT(port)) {
+		alu.is_static = false;
+		ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
+						    &member, &timestamp,
+						    &entries);
+		if (!ret && (member & BIT(port))) {
 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
 			if (ret)
-				goto exit;
+				break;
 		}
-	} while (ksz_data & ALU_START);
-
-exit:
-
-	/* stop ALU search */
-	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
-
-	mutex_unlock(&dev->alu_mutex);
+		i++;
+	} while (i < entries);
+	if (i >= entries)
+		ret = 0;
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
 
-static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
-				const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
+			 const struct switchdev_obj_port_mdb *mdb)
 {
 	/* nothing to do */
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
 
-static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
-			     const struct switchdev_obj_port_mdb *mdb)
+void ksz_port_mdb_add(struct dsa_switch *ds, int port,
+		      const struct switchdev_obj_port_mdb *mdb)
 {
 	struct ksz_device *dev = ds->priv;
-	u32 static_table[4];
-	u32 data;
+	struct alu_struct alu;
 	int index;
-	u32 mac_hi, mac_lo;
+	int empty = 0;
 
-	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
-	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
-	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+	alu.port_forward = 0;
+	for (index = 0; index < dev->num_statics; index++) {
+		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
+			/* Found one already in static MAC table. */
+			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+			    alu.fid == mdb->vid)
+				break;
+		/* Remember the first empty entry. */
+		} else if (!empty) {
+			empty = index + 1;
+		}
+	}
 
-	mutex_lock(&dev->alu_mutex);
+	/* no available entry */
+	if (index == dev->num_statics && !empty)
+		return;
+
+	/* add entry */
+	if (index == dev->num_statics) {
+		index = empty - 1;
+		memset(&alu, 0, sizeof(alu));
+		memcpy(alu.mac, mdb->addr, ETH_ALEN);
+		alu.is_static = true;
+	}
+	alu.port_forward |= BIT(port);
+	if (mdb->vid) {
+		alu.is_use_fid = true;
+
+		/* Need a way to map VID to FID. */
+		alu.fid = mdb->vid;
+	}
+	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+}
+EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
+
+int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+		     const struct switchdev_obj_port_mdb *mdb)
+{
+	struct ksz_device *dev = ds->priv;
+	struct alu_struct alu;
+	int index;
+	int ret = 0;
 
 	for (index = 0; index < dev->num_statics; index++) {
-		/* find empty slot first */
-		data = (index << ALU_STAT_INDEX_S) |
-			ALU_STAT_READ | ALU_STAT_START;
-		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
-		/* wait to be finished */
-		if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
-			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
-			goto exit;
-		}
-
-		/* read ALU static table */
-		read_table(ds, static_table);
-
-		if (static_table[0] & ALU_V_STATIC_VALID) {
-			/* check this has same vid & mac address */
-			if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
-			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
-			    (static_table[3] == mac_lo)) {
-				/* found matching one */
+		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
+			/* Found one already in static MAC table. */
+			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+			    alu.fid == mdb->vid)
 				break;
-			}
-		} else {
-			/* found empty one */
-			break;
 		}
 	}
 
@@ -917,229 +221,44 @@ static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
 	if (index == dev->num_statics)
 		goto exit;
 
-	/* add entry */
-	static_table[0] = ALU_V_STATIC_VALID;
-	static_table[1] |= BIT(port);
-	if (mdb->vid)
-		static_table[1] |= ALU_V_USE_FID;
-	static_table[2] = (mdb->vid << ALU_V_FID_S);
-	static_table[2] |= mac_hi;
-	static_table[3] = mac_lo;
-
-	write_table(ds, static_table);
-
-	data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
-	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
-	/* wait to be finished */
-	if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
-		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
-
-exit:
-	mutex_unlock(&dev->alu_mutex);
-}
-
-static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
-			    const struct switchdev_obj_port_mdb *mdb)
-{
-	struct ksz_device *dev = ds->priv;
-	u32 static_table[4];
-	u32 data;
-	int index;
-	int ret = 0;
-	u32 mac_hi, mac_lo;
-
-	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
-	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
-	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
-
-	mutex_lock(&dev->alu_mutex);
-
-	for (index = 0; index < dev->num_statics; index++) {
-		/* find empty slot first */
-		data = (index << ALU_STAT_INDEX_S) |
-			ALU_STAT_READ | ALU_STAT_START;
-		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
-		/* wait to be finished */
-		ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
-		if (ret < 0) {
-			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
-			goto exit;
-		}
-
-		/* read ALU static table */
-		read_table(ds, static_table);
-
-		if (static_table[0] & ALU_V_STATIC_VALID) {
-			/* check this has same vid & mac address */
-
-			if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
-			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
-			    (static_table[3] == mac_lo)) {
-				/* found matching one */
-				break;
-			}
-		}
-	}
-
-	/* no available entry */
-	if (index == dev->num_statics) {
-		ret = -EINVAL;
-		goto exit;
-	}
-
 	/* clear port */
-	static_table[1] &= ~BIT(port);
-
-	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
-		/* delete entry */
-		static_table[0] = 0;
-		static_table[1] = 0;
-		static_table[2] = 0;
-		static_table[3] = 0;
-	}
-
-	write_table(ds, static_table);
-
-	data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
-	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
-	/* wait to be finished */
-	ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
-	if (ret < 0)
-		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+	alu.port_forward &= ~BIT(port);
+	if (!alu.port_forward)
+		alu.is_static = false;
+	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
 
 exit:
-	mutex_unlock(&dev->alu_mutex);
-
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
 
-static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
-			       struct dsa_mall_mirror_tc_entry *mirror,
-			       bool ingress)
+int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
 {
 	struct ksz_device *dev = ds->priv;
 
-	if (ingress)
-		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
-	else
-		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+	/* setup slave port */
+	dev->dev_ops->port_setup(dev, port, false);
 
-	ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
-	/* configure mirror port */
-	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
-		     PORT_MIRROR_SNIFFER, true);
-
-	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+	/* port_stp_state_set() will be called after to enable the port so
+	 * there is no need to do anything.
+	 */
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ksz_enable_port);
 
-static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
-				struct dsa_mall_mirror_tc_entry *mirror)
+void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
 {
 	struct ksz_device *dev = ds->priv;
-	u8 data;
 
-	if (mirror->ingress)
-		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
-	else
-		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+	dev->on_ports &= ~(1 << port);
+	dev->live_ports &= ~(1 << port);
 
-	ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
-
-	if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
-		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
-			     PORT_MIRROR_SNIFFER, false);
+	/* port_stp_state_set() will be called after to disable the port so
+	 * there is no need to do anything.
+	 */
 }
-
-static const struct dsa_switch_ops ksz_switch_ops = {
-	.get_tag_protocol	= ksz_get_tag_protocol,
-	.setup			= ksz_setup,
-	.phy_read		= ksz_phy_read16,
-	.phy_write		= ksz_phy_write16,
-	.port_enable		= ksz_enable_port,
-	.port_disable		= ksz_disable_port,
-	.get_strings		= ksz_get_strings,
-	.get_ethtool_stats	= ksz_get_ethtool_stats,
-	.get_sset_count		= ksz_sset_count,
-	.port_stp_state_set	= ksz_port_stp_state_set,
-	.port_fast_age		= ksz_port_fast_age,
-	.port_vlan_filtering	= ksz_port_vlan_filtering,
-	.port_vlan_prepare	= ksz_port_vlan_prepare,
-	.port_vlan_add		= ksz_port_vlan_add,
-	.port_vlan_del		= ksz_port_vlan_del,
-	.port_fdb_dump		= ksz_port_fdb_dump,
-	.port_fdb_add		= ksz_port_fdb_add,
-	.port_fdb_del		= ksz_port_fdb_del,
-	.port_mdb_prepare       = ksz_port_mdb_prepare,
-	.port_mdb_add           = ksz_port_mdb_add,
-	.port_mdb_del           = ksz_port_mdb_del,
-	.port_mirror_add	= ksz_port_mirror_add,
-	.port_mirror_del	= ksz_port_mirror_del,
-};
-
-struct ksz_chip_data {
-	u32 chip_id;
-	const char *dev_name;
-	int num_vlans;
-	int num_alus;
-	int num_statics;
-	int cpu_ports;
-	int port_cnt;
-};
-
-static const struct ksz_chip_data ksz_switch_chips[] = {
-	{
-		.chip_id = 0x00947700,
-		.dev_name = "KSZ9477",
-		.num_vlans = 4096,
-		.num_alus = 4096,
-		.num_statics = 16,
-		.cpu_ports = 0x7F,	/* can be configured as cpu port */
-		.port_cnt = 7,		/* total physical port count */
-	},
-	{
-		.chip_id = 0x00989700,
-		.dev_name = "KSZ9897",
-		.num_vlans = 4096,
-		.num_alus = 4096,
-		.num_statics = 16,
-		.cpu_ports = 0x7F,	/* can be configured as cpu port */
-		.port_cnt = 7,		/* total physical port count */
-	},
-};
-
-static int ksz_switch_init(struct ksz_device *dev)
-{
-	int i;
-
-	dev->ds->ops = &ksz_switch_ops;
-
-	for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
-		const struct ksz_chip_data *chip = &ksz_switch_chips[i];
-
-		if (dev->chip_id == chip->chip_id) {
-			dev->name = chip->dev_name;
-			dev->num_vlans = chip->num_vlans;
-			dev->num_alus = chip->num_alus;
-			dev->num_statics = chip->num_statics;
-			dev->port_cnt = chip->port_cnt;
-			dev->cpu_ports = chip->cpu_ports;
-
-			break;
-		}
-	}
-
-	/* no switch found */
-	if (!dev->port_cnt)
-		return -ENODEV;
-
-	return 0;
-}
+EXPORT_SYMBOL_GPL(ksz_disable_port);
 
 struct ksz_device *ksz_switch_alloc(struct device *base,
 				    const struct ksz_io_ops *ops,
@@ -1167,34 +286,8 @@ struct ksz_device *ksz_switch_alloc(struct device *base,
 }
 EXPORT_SYMBOL(ksz_switch_alloc);
 
-int ksz_switch_detect(struct ksz_device *dev)
-{
-	u8 data8;
-	u32 id32;
-	int ret;
-
-	/* turn off SPI DO Edge select */
-	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
-	if (ret)
-		return ret;
-
-	data8 &= ~SPI_AUTO_EDGE_DETECTION;
-	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
-	if (ret)
-		return ret;
-
-	/* read chip id */
-	ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
-	if (ret)
-		return ret;
-
-	dev->chip_id = id32;
-
-	return 0;
-}
-EXPORT_SYMBOL(ksz_switch_detect);
-
-int ksz_switch_register(struct ksz_device *dev)
+int ksz_switch_register(struct ksz_device *dev,
+			const struct ksz_dev_ops *ops)
 {
 	int ret;
 
@@ -1206,19 +299,35 @@ int ksz_switch_register(struct ksz_device *dev)
 	mutex_init(&dev->alu_mutex);
 	mutex_init(&dev->vlan_mutex);
 
-	if (ksz_switch_detect(dev))
+	dev->dev_ops = ops;
+
+	if (dev->dev_ops->detect(dev))
 		return -EINVAL;
 
-	ret = ksz_switch_init(dev);
+	ret = dev->dev_ops->init(dev);
 	if (ret)
 		return ret;
 
-	return dsa_register_switch(dev->ds);
+	dev->interface = PHY_INTERFACE_MODE_MII;
+	if (dev->dev->of_node) {
+		ret = of_get_phy_mode(dev->dev->of_node);
+		if (ret >= 0)
+			dev->interface = ret;
+	}
+
+	ret = dsa_register_switch(dev->ds);
+	if (ret) {
+		dev->dev_ops->exit(dev);
+		return ret;
+	}
+
+	return 0;
 }
 EXPORT_SYMBOL(ksz_switch_register);
 
 void ksz_switch_remove(struct ksz_device *dev)
 {
+	dev->dev_ops->exit(dev);
 	dsa_unregister_switch(dev->ds);
 }
 EXPORT_SYMBOL(ksz_switch_remove);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
new file mode 100644
index 0000000..2dd832d
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Microchip switch driver common header
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_COMMON_H
+#define __KSZ_COMMON_H
+
+void ksz_update_port_member(struct ksz_device *dev, int port);
+
+/* Common DSA access functions */
+
+int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
+int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
+int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
+int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+			 struct net_device *br);
+void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+			   struct net_device *br);
+void ksz_port_fast_age(struct dsa_switch *ds, int port);
+int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
+			  const struct switchdev_obj_port_vlan *vlan);
+int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
+		      void *data);
+int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
+			 const struct switchdev_obj_port_mdb *mdb);
+void ksz_port_mdb_add(struct dsa_switch *ds, int port,
+		      const struct switchdev_obj_port_mdb *mdb);
+int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+		     const struct switchdev_obj_port_mdb *mdb);
+int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+
+/* Common register access functions */
+
+static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->read8(dev, reg, val);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->read16(dev, reg, val);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->read24(dev, reg, val);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->read32(dev, reg, val);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->write8(dev, reg, value);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->write16(dev, reg, value);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->write24(dev, reg, value);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->write32(dev, reg, value);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_get(struct ksz_device *dev, u32 reg, void *data,
+			  size_t len)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->get(dev, reg, data, len);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline int ksz_set(struct ksz_device *dev, u32 reg, void *data,
+			  size_t len)
+{
+	int ret;
+
+	mutex_lock(&dev->reg_mutex);
+	ret = dev->ops->set(dev, reg, data, len);
+	mutex_unlock(&dev->reg_mutex);
+
+	return ret;
+}
+
+static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
+			      u8 *data)
+{
+	ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
+			       u16 *data)
+{
+	ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
+			       u32 *data)
+{
+	ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+			       u8 data)
+{
+	ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+				u16 data)
+{
+	ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+				u32 data)
+{
+	ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+	u8 data;
+
+	ksz_read8(dev, addr, &data);
+	if (set)
+		data |= bits;
+	else
+		data &= ~bits;
+	ksz_write8(dev, addr, data);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+			 bool set)
+{
+	u32 addr;
+	u8 data;
+
+	addr = dev->dev_ops->get_port_addr(port, offset);
+	ksz_read8(dev, addr, &data);
+
+	if (set)
+		data |= bits;
+	else
+		data &= ~bits;
+
+	ksz_write8(dev, addr, data);
+}
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
index 2a98dbd..a38ff08 100644
--- a/drivers/net/dsa/microchip/ksz_priv.h
+++ b/drivers/net/dsa/microchip/ksz_priv.h
@@ -1,19 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
  * Microchip KSZ series switch common definitions
  *
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
  */
 
 #ifndef __KSZ_PRIV_H
@@ -25,7 +14,7 @@
 #include <linux/etherdevice.h>
 #include <net/dsa.h>
 
-#include "ksz_9477_reg.h"
+#include "ksz9477_reg.h"
 
 struct ksz_io_ops;
 
@@ -33,6 +22,27 @@ struct vlan_table {
 	u32 table[3];
 };
 
+struct ksz_port_mib {
+	u8 cnt_ptr;
+	u64 *counters;
+};
+
+struct ksz_port {
+	u16 member;
+	u16 vid_member;
+	int stp_state;
+	struct phy_device phydev;
+
+	u32 on:1;			/* port is not disabled by hardware */
+	u32 phy:1;			/* port has a PHY */
+	u32 fiber:1;			/* port is fiber */
+	u32 sgmii:1;			/* port is SGMII */
+	u32 force:1;
+	u32 link_just_down:1;		/* link just goes down */
+
+	struct ksz_port_mib mib;
+};
+
 struct ksz_device {
 	struct dsa_switch *ds;
 	struct ksz_platform_data *pdata;
@@ -43,6 +53,7 @@ struct ksz_device {
 	struct mutex alu_mutex;		/* ALU access */
 	struct mutex vlan_mutex;	/* vlan access */
 	const struct ksz_io_ops *ops;
+	const struct ksz_dev_ops *dev_ops;
 
 	struct device *dev;
 
@@ -55,11 +66,37 @@ struct ksz_device {
 	int num_statics;
 	int cpu_port;			/* port connected to CPU */
 	int cpu_ports;			/* port bitmap can be cpu port */
+	int phy_port_cnt;
 	int port_cnt;
+	int reg_mib_cnt;
+	int mib_cnt;
+	int mib_port_cnt;
+	int last_port;			/* ports after that not used */
+	phy_interface_t interface;
+	u32 regs_size;
 
 	struct vlan_table *vlan_cache;
 
 	u64 mib_value[TOTAL_SWITCH_COUNTER_NUM];
+
+	u8 *txbuf;
+
+	struct ksz_port *ports;
+	struct timer_list mib_read_timer;
+	struct work_struct mib_read;
+	unsigned long mib_read_interval;
+	u16 br_member;
+	u16 member;
+	u16 live_ports;
+	u16 on_ports;			/* ports enabled by DSA */
+	u16 rx_ports;
+	u16 tx_ports;
+	u16 mirror_rx;
+	u16 mirror_tx;
+	u32 features;			/* chip specific features */
+	u32 overrides;			/* chip functions set by user */
+	u16 host_mask;
+	u16 port_mask;
 };
 
 struct ksz_io_ops {
@@ -71,140 +108,60 @@ struct ksz_io_ops {
 	int (*write16)(struct ksz_device *dev, u32 reg, u16 value);
 	int (*write24)(struct ksz_device *dev, u32 reg, u32 value);
 	int (*write32)(struct ksz_device *dev, u32 reg, u32 value);
-	int (*phy_read16)(struct ksz_device *dev, int addr, int reg,
-			  u16 *value);
-	int (*phy_write16)(struct ksz_device *dev, int addr, int reg,
-			   u16 value);
+	int (*get)(struct ksz_device *dev, u32 reg, void *data, size_t len);
+	int (*set)(struct ksz_device *dev, u32 reg, void *data, size_t len);
+};
+
+struct alu_struct {
+	/* entry 1 */
+	u8	is_static:1;
+	u8	is_src_filter:1;
+	u8	is_dst_filter:1;
+	u8	prio_age:3;
+	u32	_reserv_0_1:23;
+	u8	mstp:3;
+	/* entry 2 */
+	u8	is_override:1;
+	u8	is_use_fid:1;
+	u32	_reserv_1_1:23;
+	u8	port_forward:7;
+	/* entry 3 & 4*/
+	u32	_reserv_2_1:9;
+	u8	fid:7;
+	u8	mac[ETH_ALEN];
+};
+
+struct ksz_dev_ops {
+	u32 (*get_port_addr)(int port, int offset);
+	void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
+	void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
+	void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
+	void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+	void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+	int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+			       u8 *fid, u8 *src_port, u8 *timestamp,
+			       u16 *entries);
+	int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
+			       struct alu_struct *alu);
+	void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
+				struct alu_struct *alu);
+	void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
+			  u64 *cnt);
+	void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
+			  u64 *dropped, u64 *cnt);
+	void (*port_init_cnt)(struct ksz_device *dev, int port);
+	int (*shutdown)(struct ksz_device *dev);
+	int (*detect)(struct ksz_device *dev);
+	int (*init)(struct ksz_device *dev);
+	void (*exit)(struct ksz_device *dev);
 };
 
 struct ksz_device *ksz_switch_alloc(struct device *base,
 				    const struct ksz_io_ops *ops, void *priv);
-int ksz_switch_detect(struct ksz_device *dev);
-int ksz_switch_register(struct ksz_device *dev);
+int ksz_switch_register(struct ksz_device *dev,
+			const struct ksz_dev_ops *ops);
 void ksz_switch_remove(struct ksz_device *dev);
 
-static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->read8(dev, reg, val);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->read16(dev, reg, val);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->read24(dev, reg, val);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->read32(dev, reg, val);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->write8(dev, reg, value);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->write16(dev, reg, value);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->write24(dev, reg, value);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
-{
-	int ret;
-
-	mutex_lock(&dev->reg_mutex);
-	ret = dev->ops->write32(dev, reg, value);
-	mutex_unlock(&dev->reg_mutex);
-
-	return ret;
-}
-
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
-			      u8 *data)
-{
-	ksz_read8(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
-			       u16 *data)
-{
-	ksz_read16(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
-			       u32 *data)
-{
-	ksz_read32(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
-			       u8 data)
-{
-	ksz_write8(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
-				u16 data)
-{
-	ksz_write16(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
-				u32 data)
-{
-	ksz_write32(dev, PORT_CTRL_ADDR(port, offset), data);
-}
+int ksz9477_switch_register(struct ksz_device *dev);
 
 #endif
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
deleted file mode 100644
index 8c1778b..0000000
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Microchip KSZ series register access through SPI
- *
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_priv.h"
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD			3
-#define KS_SPIOP_WR			2
-
-#define SPI_ADDR_SHIFT			24
-#define SPI_ADDR_MASK			(BIT(SPI_ADDR_SHIFT) - 1)
-#define SPI_TURNAROUND_SHIFT		5
-
-static int ksz_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
-			    unsigned int len)
-{
-	u32 txbuf;
-	int ret;
-
-	txbuf = reg & SPI_ADDR_MASK;
-	txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
-	txbuf <<= SPI_TURNAROUND_SHIFT;
-	txbuf = cpu_to_be32(txbuf);
-
-	ret = spi_write_then_read(spi, &txbuf, 4, val, len);
-	return ret;
-}
-
-static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
-			unsigned int len)
-{
-	struct spi_device *spi = dev->priv;
-
-	return ksz_spi_read_reg(spi, reg, data, len);
-}
-
-static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
-{
-	return ksz_spi_read(dev, reg, val, 1);
-}
-
-static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
-{
-	int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
-
-	if (!ret)
-		*val = be16_to_cpu(*val);
-
-	return ret;
-}
-
-static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
-	int ret;
-
-	*val = 0;
-	ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
-	if (!ret) {
-		*val = be32_to_cpu(*val);
-		/* convert to 24bit */
-		*val >>= 8;
-	}
-
-	return ret;
-}
-
-static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
-{
-	int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
-
-	if (!ret)
-		*val = be32_to_cpu(*val);
-
-	return ret;
-}
-
-static int ksz_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
-			     unsigned int len)
-{
-	u32 txbuf;
-	u8 data[12];
-	int i;
-
-	txbuf = reg & SPI_ADDR_MASK;
-	txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
-	txbuf <<= SPI_TURNAROUND_SHIFT;
-	txbuf = cpu_to_be32(txbuf);
-
-	data[0] = txbuf & 0xFF;
-	data[1] = (txbuf & 0xFF00) >> 8;
-	data[2] = (txbuf & 0xFF0000) >> 16;
-	data[3] = (txbuf & 0xFF000000) >> 24;
-	for (i = 0; i < len; i++)
-		data[i + 4] = val[i];
-
-	return spi_write(spi, &data, 4 + len);
-}
-
-static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
-{
-	struct spi_device *spi = dev->priv;
-
-	return ksz_spi_write_reg(spi, reg, &value, 1);
-}
-
-static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
-{
-	struct spi_device *spi = dev->priv;
-
-	value = cpu_to_be16(value);
-	return ksz_spi_write_reg(spi, reg, (u8 *)&value, 2);
-}
-
-static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
-	struct spi_device *spi = dev->priv;
-
-	/* make it to big endian 24bit from MSB */
-	value <<= 8;
-	value = cpu_to_be32(value);
-	return ksz_spi_write_reg(spi, reg, (u8 *)&value, 3);
-}
-
-static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
-{
-	struct spi_device *spi = dev->priv;
-
-	value = cpu_to_be32(value);
-	return ksz_spi_write_reg(spi, reg, (u8 *)&value, 4);
-}
-
-static const struct ksz_io_ops ksz_spi_ops = {
-	.read8 = ksz_spi_read8,
-	.read16 = ksz_spi_read16,
-	.read24 = ksz_spi_read24,
-	.read32 = ksz_spi_read32,
-	.write8 = ksz_spi_write8,
-	.write16 = ksz_spi_write16,
-	.write24 = ksz_spi_write24,
-	.write32 = ksz_spi_write32,
-};
-
-static int ksz_spi_probe(struct spi_device *spi)
-{
-	struct ksz_device *dev;
-	int ret;
-
-	dev = ksz_switch_alloc(&spi->dev, &ksz_spi_ops, spi);
-	if (!dev)
-		return -ENOMEM;
-
-	if (spi->dev.platform_data)
-		dev->pdata = spi->dev.platform_data;
-
-	ret = ksz_switch_register(dev);
-	if (ret)
-		return ret;
-
-	spi_set_drvdata(spi, dev);
-
-	return 0;
-}
-
-static int ksz_spi_remove(struct spi_device *spi)
-{
-	struct ksz_device *dev = spi_get_drvdata(spi);
-
-	if (dev)
-		ksz_switch_remove(dev);
-
-	return 0;
-}
-
-static const struct of_device_id ksz_dt_ids[] = {
-	{ .compatible = "microchip,ksz9477" },
-	{ .compatible = "microchip,ksz9897" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, ksz_dt_ids);
-
-static struct spi_driver ksz_spi_driver = {
-	.driver = {
-		.name	= "ksz9477-switch",
-		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(ksz_dt_ids),
-	},
-	.probe	= ksz_spi_probe,
-	.remove	= ksz_spi_remove,
-};
-
-module_spi_driver(ksz_spi_driver);
-
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_spi.h b/drivers/net/dsa/microchip/ksz_spi.h
new file mode 100644
index 0000000..427811b
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Microchip KSZ series SPI access common header
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ *	Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#ifndef __KSZ_SPI_H
+#define __KSZ_SPI_H
+
+/* Chip dependent SPI access */
+static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
+			unsigned int len);
+static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
+			 unsigned int len);
+
+static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+	return ksz_spi_read(dev, reg, val, 1);
+}
+
+static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+	int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
+
+	if (!ret)
+		*val = be16_to_cpu(*val);
+
+	return ret;
+}
+
+static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+	int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
+
+	if (!ret)
+		*val = be32_to_cpu(*val);
+
+	return ret;
+}
+
+static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+	return ksz_spi_write(dev, reg, &value, 1);
+}
+
+static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+	value = cpu_to_be16(value);
+	return ksz_spi_write(dev, reg, &value, 2);
+}
+
+static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+	value = cpu_to_be32(value);
+	return ksz_spi_write(dev, reg, &value, 4);
+}
+
+static int ksz_spi_get(struct ksz_device *dev, u32 reg, void *data, size_t len)
+{
+	return ksz_spi_read(dev, reg, data, len);
+}
+
+static int ksz_spi_set(struct ksz_device *dev, u32 reg, void *data, size_t len)
+{
+	return ksz_spi_write(dev, reg, data, len);
+}
+
+#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a5de9bf..74547f4 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -658,7 +658,8 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
 			if (phydev->asym_pause)
 				rmt_adv |= LPA_PAUSE_ASYM;
 
-			lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+			lcl_adv = linkmode_adv_to_lcl_adv_t(
+				phydev->advertising);
 			flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
 
 			if (flowctrl & FLOW_CTRL_TX)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index e05d4ed..b603f8d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2524,11 +2524,22 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
 	mutex_unlock(&chip->reg_lock);
 
 	if (reg == MII_PHYSID2) {
-		/* Some internal PHYS don't have a model number.  Use
-		 * the mv88e6390 family model number instead.
-		 */
-		if (!(val & 0x3f0))
-			val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+		/* Some internal PHYs don't have a model number. */
+		if (chip->info->family != MV88E6XXX_FAMILY_6165)
+			/* Then there is the 6165 family. It gets is
+			 * PHYs correct. But it can also have two
+			 * SERDES interfaces in the PHY address
+			 * space. And these don't have a model
+			 * number. But they are not PHYs, so we don't
+			 * want to give them something a PHY driver
+			 * will recognise.
+			 *
+			 * Use the mv88e6390 family model number
+			 * instead, for anything which really could be
+			 * a PHY,
+			 */
+			if (!(val & 0x3f0))
+				val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
 	}
 
 	return err ? err : val;
@@ -3234,6 +3245,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6352_port_get_cmode,
+	.port_set_cmode = mv88e6390_port_set_cmode,
 	.stats_snapshot = mv88e6390_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3276,6 +3288,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6352_port_get_cmode,
+	.port_set_cmode = mv88e6390x_port_set_cmode,
 	.stats_snapshot = mv88e6390_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3291,8 +3304,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
 	.vtu_getnext = mv88e6390_g1_vtu_getnext,
 	.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
 	.serdes_power = mv88e6390x_serdes_power,
-	.serdes_irq_setup = mv88e6390_serdes_irq_setup,
-	.serdes_irq_free = mv88e6390_serdes_irq_free,
+	.serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+	.serdes_irq_free = mv88e6390x_serdes_irq_free,
 	.gpio_ops = &mv88e6352_gpio_ops,
 	.phylink_validate = mv88e6390x_phylink_validate,
 };
@@ -3318,6 +3331,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6352_port_get_cmode,
+	.port_set_cmode = mv88e6390_port_set_cmode,
 	.stats_snapshot = mv88e6390_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3405,11 +3419,11 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
 	.port_set_egress_floods = mv88e6352_port_set_egress_floods,
 	.port_set_ether_type = mv88e6351_port_set_ether_type,
 	.port_pause_limit = mv88e6390_port_pause_limit,
-	.port_set_cmode = mv88e6390x_port_set_cmode,
 	.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6352_port_get_cmode,
+	.port_set_cmode = mv88e6390_port_set_cmode,
 	.stats_snapshot = mv88e6390_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3710,11 +3724,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
 	.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
 	.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
 	.port_pause_limit = mv88e6390_port_pause_limit,
-	.port_set_cmode = mv88e6390x_port_set_cmode,
 	.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6352_port_get_cmode,
+	.port_set_cmode = mv88e6390_port_set_cmode,
 	.stats_snapshot = mv88e6390_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3757,11 +3771,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
 	.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
 	.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
 	.port_pause_limit = mv88e6390_port_pause_limit,
-	.port_set_cmode = mv88e6390x_port_set_cmode,
 	.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6352_port_get_cmode,
+	.port_set_cmode = mv88e6390x_port_set_cmode,
 	.stats_snapshot = mv88e6390_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3777,8 +3791,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
 	.vtu_getnext = mv88e6390_g1_vtu_getnext,
 	.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
 	.serdes_power = mv88e6390x_serdes_power,
-	.serdes_irq_setup = mv88e6390_serdes_irq_setup,
-	.serdes_irq_free = mv88e6390_serdes_irq_free,
+	.serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+	.serdes_irq_free = mv88e6390x_serdes_irq_free,
 	.gpio_ops = &mv88e6352_gpio_ops,
 	.avb_ops = &mv88e6390_avb_ops,
 	.ptp_ops = &mv88e6352_ptp_ops,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index cd7db60..ebd26b6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -368,12 +368,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
 	u16 reg;
 	int err;
 
-	if (mode == PHY_INTERFACE_MODE_NA)
-		return 0;
-
 	if (port != 9 && port != 10)
 		return -EOPNOTSUPP;
 
+	/* Default to a slow mode, so freeing up SERDES interfaces for
+	 * other ports which might use them for SFPs.
+	 */
+	if (mode == PHY_INTERFACE_MODE_NA)
+		mode = PHY_INTERFACE_MODE_1000BASEX;
+
 	switch (mode) {
 	case PHY_INTERFACE_MODE_1000BASEX:
 		cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X;
@@ -437,6 +440,21 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
 	return 0;
 }
 
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+			     phy_interface_t mode)
+{
+	switch (mode) {
+	case PHY_INTERFACE_MODE_XGMII:
+	case PHY_INTERFACE_MODE_XAUI:
+	case PHY_INTERFACE_MODE_RXAUI:
+		return -EINVAL;
+	default:
+		break;
+	}
+
+	return mv88e6390x_port_set_cmode(chip, port, mode);
+}
+
 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
 {
 	int err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 36904c9..0d81866 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -310,6 +310,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
 			       u8 out);
 int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
 			       u8 out);
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+			     phy_interface_t mode);
 int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
 			      phy_interface_t mode);
 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index bb69650..2caa8c8 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -619,15 +619,11 @@ static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id)
 	return ret;
 }
 
-int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
 {
 	int lane;
 	int err;
 
-	/* Only support ports 9 and 10 at the moment */
-	if (port < 9)
-		return 0;
-
 	lane = mv88e6390x_serdes_get_lane(chip, port);
 
 	if (lane == -ENODEV)
@@ -663,11 +659,19 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
 	return mv88e6390_serdes_irq_enable(chip, port, lane);
 }
 
-void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+{
+	if (port < 9)
+		return 0;
+
+	return mv88e6390_serdes_irq_setup(chip, port);
+}
+
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
 {
 	int lane = mv88e6390x_serdes_get_lane(chip, port);
 
-	if (port < 9)
+	if (lane == -ENODEV)
 		return;
 
 	if (lane < 0)
@@ -685,6 +689,14 @@ void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
 	chip->ports[port].serdes_irq = 0;
 }
 
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+{
+	if (port < 9)
+		return;
+
+	mv88e6390x_serdes_irq_free(chip, port);
+}
+
 int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
 {
 	u8 cmode = chip->ports[port].cmode;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 7870c5a..573dce8 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -77,6 +77,8 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
 int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
 int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
 void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
 int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
 int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
 				 int port, uint8_t *data);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7c9348a..91fc64c 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1283,7 +1283,7 @@ static int greth_mdio_probe(struct net_device *dev)
 	else
 		phy_set_max_speed(phy, SPEED_100);
 
-	phy->advertising = phy->supported;
+	linkmode_copy(phy->advertising, phy->supported);
 
 	greth->link = 0;
 	greth->speed = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 151bdb6..128cd64 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -857,6 +857,7 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
 
 static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
 	struct xgbe_phy_data *phy_data = pdata->phy_data;
 	unsigned int phy_id = phy_data->phydev->phy_id;
 
@@ -878,9 +879,15 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
 	phy_write(phy_data->phydev, 0x04, 0x0d01);
 	phy_write(phy_data->phydev, 0x00, 0x9140);
 
-	phy_data->phydev->supported = PHY_10BT_FEATURES |
-				      PHY_100BT_FEATURES |
-				      PHY_1000BT_FEATURES;
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       supported);
+	linkmode_set_bit_array(phy_gbit_features_array,
+			       ARRAY_SIZE(phy_gbit_features_array),
+			       supported);
+
+	linkmode_copy(phy_data->phydev->supported, supported);
+
 	phy_support_asym_pause(phy_data->phydev);
 
 	netif_dbg(pdata, drv, pdata->netdev,
@@ -891,6 +898,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
 
 static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
 	struct xgbe_phy_data *phy_data = pdata->phy_data;
 	struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
 	unsigned int phy_id = phy_data->phydev->phy_id;
@@ -951,9 +959,13 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
 	reg = phy_read(phy_data->phydev, 0x00);
 	phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
 
-	phy_data->phydev->supported = (PHY_10BT_FEATURES |
-				       PHY_100BT_FEATURES |
-				       PHY_1000BT_FEATURES);
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       supported);
+	linkmode_set_bit_array(phy_gbit_features_array,
+			       ARRAY_SIZE(phy_gbit_features_array),
+			       supported);
+	linkmode_copy(phy_data->phydev->supported, supported);
 	phy_support_asym_pause(phy_data->phydev);
 
 	netif_dbg(pdata, drv, pdata->netdev,
@@ -976,7 +988,6 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
 	struct xgbe_phy_data *phy_data = pdata->phy_data;
 	struct phy_device *phydev;
-	u32 advertising;
 	int ret;
 
 	/* If we already have a PHY, just return */
@@ -1036,9 +1047,8 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
 
 	xgbe_phy_external_phy_quirks(pdata);
 
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						lks->link_modes.advertising);
-	phydev->advertising &= advertising;
+	linkmode_and(phydev->advertising, phydev->advertising,
+		     lks->link_modes.advertising);
 
 	phy_start_aneg(phy_data->phydev);
 
@@ -1497,7 +1507,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
 	if (!phy_data->phydev)
 		return;
 
-	lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+	lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising);
 
 	if (phy_data->phydev->pause) {
 		XGBE_SET_LP_ADV(lks, Pause);
@@ -1815,7 +1825,6 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
 {
 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
 	struct xgbe_phy_data *phy_data = pdata->phy_data;
-	u32 advertising;
 	int ret;
 
 	ret = xgbe_phy_find_phy_device(pdata);
@@ -1825,12 +1834,10 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
 	if (!phy_data->phydev)
 		return 0;
 
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						lks->link_modes.advertising);
-
 	phy_data->phydev->autoneg = pdata->phy.autoneg;
-	phy_data->phydev->advertising = phy_data->phydev->supported &
-					advertising;
+	linkmode_and(phy_data->phydev->advertising,
+		     phy_data->phydev->supported,
+		     lks->link_modes.advertising);
 
 	if (pdata->phy.autoneg != AUTONEG_ENABLE) {
 		phy_data->phydev->speed = pdata->phy.speed;
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index f5fe3bb..53529cd 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -109,6 +109,7 @@ void xge_mdio_remove(struct net_device *ndev)
 
 int xge_mdio_config(struct net_device *ndev)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 	struct xge_pdata *pdata = netdev_priv(ndev);
 	struct device *dev = &pdata->pdev->dev;
 	struct mii_bus *mdio_bus;
@@ -148,16 +149,17 @@ int xge_mdio_config(struct net_device *ndev)
 		goto err;
 	}
 
-	phydev->supported &= ~(SUPPORTED_10baseT_Half |
-			       SUPPORTED_10baseT_Full |
-			       SUPPORTED_100baseT_Half |
-			       SUPPORTED_100baseT_Full |
-			       SUPPORTED_1000baseT_Half |
-			       SUPPORTED_AUI |
-			       SUPPORTED_MII |
-			       SUPPORTED_FIBRE |
-			       SUPPORTED_BNC);
-	phydev->advertising = phydev->supported;
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
+
+	linkmode_andnot(phydev->supported, phydev->supported, mask);
+	linkmode_copy(phydev->advertising, phydev->supported);
 	pdata->phy_speed = SPEED_UNKNOWN;
 
 	return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 686f6d8..4556630 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@
 	aq_ring.o \
 	aq_hw_utils.o \
 	aq_ethtool.o \
+	aq_filters.o \
 	hw_atl/hw_atl_a0.o \
 	hw_atl/hw_atl_b0.o \
 	hw_atl/hw_atl_utils.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index becb578..6b6d172 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -14,7 +14,7 @@
 
 #include <linux/etherdevice.h>
 #include <linux/pci.h>
-
+#include <linux/if_vlan.h>
 #include "ver.h"
 #include "aq_cfg.h"
 #include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 99ef1da..a5fd716 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -12,6 +12,7 @@
 #include "aq_ethtool.h"
 #include "aq_nic.h"
 #include "aq_vec.h"
+#include "aq_filters.h"
 
 static void aq_ethtool_get_regs(struct net_device *ndev,
 				struct ethtool_regs *regs, void *p)
@@ -213,7 +214,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
 	case ETHTOOL_GRXRINGS:
 		cmd->data = cfg->vecs;
 		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		err = aq_get_rxnfc_rule(aq_nic, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
 
+	return err;
+}
+
+static int aq_ethtool_set_rxnfc(struct net_device *ndev,
+				struct ethtool_rxnfc *cmd)
+{
+	int err = 0;
+	struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		err = aq_add_rxnfc_rule(aq_nic, cmd);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		err = aq_del_rxnfc_rule(aq_nic, cmd);
+		break;
 	default:
 		err = -EOPNOTSUPP;
 		break;
@@ -520,6 +550,7 @@ const struct ethtool_ops aq_ethtool_ops = {
 	.get_rxfh_key_size   = aq_ethtool_get_rss_key_size,
 	.get_rxfh            = aq_ethtool_get_rss,
 	.get_rxnfc           = aq_ethtool_get_rxnfc,
+	.set_rxnfc           = aq_ethtool_set_rxnfc,
 	.get_sset_count      = aq_ethtool_get_sset_count,
 	.get_ethtool_stats   = aq_ethtool_stats,
 	.get_link_ksettings  = aq_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
new file mode 100644
index 0000000..18bc035
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.c: RX filters related functions. */
+
+#include "aq_filters.h"
+
+static bool __must_check
+aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
+{
+	if (fsp->flow_type & FLOW_MAC_EXT)
+		return false;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case ETHER_FLOW:
+	case TCP_V4_FLOW:
+	case UDP_V4_FLOW:
+	case SCTP_V4_FLOW:
+	case TCP_V6_FLOW:
+	case UDP_V6_FLOW:
+	case SCTP_V6_FLOW:
+	case IPV4_FLOW:
+	case IPV6_FLOW:
+		return true;
+	case IP_USER_FLOW:
+		switch (fsp->h_u.usr_ip4_spec.proto) {
+		case IPPROTO_TCP:
+		case IPPROTO_UDP:
+		case IPPROTO_SCTP:
+		case IPPROTO_IP:
+			return true;
+		default:
+			return false;
+			}
+	case IPV6_USER_FLOW:
+		switch (fsp->h_u.usr_ip6_spec.l4_proto) {
+		case IPPROTO_TCP:
+		case IPPROTO_UDP:
+		case IPPROTO_SCTP:
+		case IPPROTO_IP:
+			return true;
+		default:
+			return false;
+			}
+	default:
+		return false;
+	}
+
+	return false;
+}
+
+static bool __must_check
+aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
+		struct ethtool_rx_flow_spec *fsp2)
+{
+	if (fsp1->flow_type != fsp2->flow_type ||
+	    memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
+	    memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
+	    memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
+	    memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
+		return false;
+
+	return true;
+}
+
+static bool __must_check
+aq_rule_already_exists(struct aq_nic_s *aq_nic,
+		       struct ethtool_rx_flow_spec *fsp)
+{
+	struct aq_rx_filter *rule;
+	struct hlist_node *aq_node2;
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node) {
+		if (rule->aq_fsp.location == fsp->location)
+			continue;
+		if (aq_match_filter(&rule->aq_fsp, fsp)) {
+			netdev_err(aq_nic->ndev,
+				   "ethtool: This filter is already set\n");
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
+				  struct aq_hw_rx_fltrs_s *rx_fltrs,
+				  struct ethtool_rx_flow_spec *fsp)
+{
+	if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
+	    fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: location must be in range [%d, %d]",
+			   AQ_RX_FIRST_LOC_FL3L4,
+			   AQ_RX_LAST_LOC_FL3L4);
+		return -EINVAL;
+	}
+	if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
+		rx_fltrs->fl3l4.is_ipv6 = false;
+		netdev_err(aq_nic->ndev,
+			   "ethtool: mixing ipv4 and ipv6 is not allowed");
+		return -EINVAL;
+	} else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
+		rx_fltrs->fl3l4.is_ipv6 = true;
+		netdev_err(aq_nic->ndev,
+			   "ethtool: mixing ipv4 and ipv6 is not allowed");
+		return -EINVAL;
+	} else if (rx_fltrs->fl3l4.is_ipv6		      &&
+		   fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
+		   fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: The specified location for ipv6 must be %d or %d",
+			   AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __must_check
+aq_check_approve_fl2(struct aq_nic_s *aq_nic,
+		     struct aq_hw_rx_fltrs_s *rx_fltrs,
+		     struct ethtool_rx_flow_spec *fsp)
+{
+	if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
+	    fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: location must be in range [%d, %d]",
+			   AQ_RX_FIRST_LOC_FETHERT,
+			   AQ_RX_LAST_LOC_FETHERT);
+		return -EINVAL;
+	}
+
+	if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
+	    fsp->m_u.ether_spec.h_proto == 0U) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: proto (ether_type) parameter must be specified");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __must_check
+aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
+		       struct aq_hw_rx_fltrs_s *rx_fltrs,
+		       struct ethtool_rx_flow_spec *fsp)
+{
+	if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
+	    fsp->location > AQ_RX_LAST_LOC_FVLANID) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: location must be in range [%d, %d]",
+			   AQ_RX_FIRST_LOC_FVLANID,
+			   AQ_RX_LAST_LOC_FVLANID);
+		return -EINVAL;
+	}
+
+	if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+	    (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
+		       aq_nic->active_vlans))) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: unknown vlan-id specified");
+		return -EINVAL;
+	}
+
+	if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: queue number must be in range [0, %d]",
+			   aq_nic->aq_nic_cfg.num_rss_queues - 1);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __must_check
+aq_check_filter(struct aq_nic_s *aq_nic,
+		struct ethtool_rx_flow_spec *fsp)
+{
+	int err = 0;
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+	if (fsp->flow_type & FLOW_EXT) {
+		if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
+			err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
+		} else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
+			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+		} else {
+			netdev_err(aq_nic->ndev,
+				   "ethtool: invalid vlan mask 0x%x specified",
+				   be16_to_cpu(fsp->m_ext.vlan_tci));
+			err = -EINVAL;
+		}
+	} else {
+		switch (fsp->flow_type & ~FLOW_EXT) {
+		case ETHER_FLOW:
+			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+			break;
+		case TCP_V4_FLOW:
+		case UDP_V4_FLOW:
+		case SCTP_V4_FLOW:
+		case IPV4_FLOW:
+		case IP_USER_FLOW:
+			rx_fltrs->fl3l4.is_ipv6 = false;
+			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+			break;
+		case TCP_V6_FLOW:
+		case UDP_V6_FLOW:
+		case SCTP_V6_FLOW:
+		case IPV6_FLOW:
+		case IPV6_USER_FLOW:
+			rx_fltrs->fl3l4.is_ipv6 = true;
+			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+			break;
+		default:
+			netdev_err(aq_nic->ndev,
+				   "ethtool: unknown flow-type specified");
+			err = -EINVAL;
+		}
+	}
+
+	return err;
+}
+
+static bool __must_check
+aq_rule_is_not_support(struct aq_nic_s *aq_nic,
+		       struct ethtool_rx_flow_spec *fsp)
+{
+	bool rule_is_not_support = false;
+
+	if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: Please, to enable the RX flow control:\n"
+			   "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
+		rule_is_not_support = true;
+	} else if (!aq_rule_is_approve(fsp)) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: The specified flow type is not supported\n");
+		rule_is_not_support = true;
+	} else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
+		   (fsp->h_u.tcp_ip4_spec.tos ||
+		    fsp->h_u.tcp_ip6_spec.tclass)) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: The specified tos tclass are not supported\n");
+		rule_is_not_support = true;
+	} else if (fsp->flow_type & FLOW_MAC_EXT) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: MAC_EXT is not supported");
+		rule_is_not_support = true;
+	}
+
+	return rule_is_not_support;
+}
+
+static bool __must_check
+aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
+		       struct ethtool_rx_flow_spec *fsp)
+{
+	bool rule_is_not_correct = false;
+
+	if (!aq_nic) {
+		rule_is_not_correct = true;
+	} else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
+		netdev_err(aq_nic->ndev,
+			   "ethtool: The specified number %u rule is invalid\n",
+			   fsp->location);
+		rule_is_not_correct = true;
+	} else if (aq_check_filter(aq_nic, fsp)) {
+		rule_is_not_correct = true;
+	} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+		if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+			netdev_err(aq_nic->ndev,
+				   "ethtool: The specified action is invalid.\n"
+				   "Maximum allowable value action is %u.\n",
+				   aq_nic->aq_nic_cfg.num_rss_queues - 1);
+			rule_is_not_correct = true;
+		}
+	}
+
+	return rule_is_not_correct;
+}
+
+static int __must_check
+aq_check_rule(struct aq_nic_s *aq_nic,
+	      struct ethtool_rx_flow_spec *fsp)
+{
+	int err = 0;
+
+	if (aq_rule_is_not_correct(aq_nic, fsp))
+		err = -EINVAL;
+	else if (aq_rule_is_not_support(aq_nic, fsp))
+		err = -EOPNOTSUPP;
+	else if (aq_rule_already_exists(aq_nic, fsp))
+		err = -EEXIST;
+
+	return err;
+}
+
+static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
+			    struct aq_rx_filter *aq_rx_fltr,
+			    struct aq_rx_filter_l2 *data, bool add)
+{
+	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+	memset(data, 0, sizeof(*data));
+
+	data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
+
+	if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
+		data->queue = fsp->ring_cookie;
+	else
+		data->queue = -1;
+
+	data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
+	data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
+				 == VLAN_PRIO_MASK;
+	data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
+			       & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static int aq_add_del_fether(struct aq_nic_s *aq_nic,
+			     struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+	struct aq_rx_filter_l2 data;
+	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+	aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
+
+	if (unlikely(!aq_hw_ops->hw_filter_l2_set))
+		return -EOPNOTSUPP;
+	if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
+		return -EOPNOTSUPP;
+
+	if (add)
+		return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
+	else
+		return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
+}
+
+static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
+{
+	int i;
+
+	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+		if (aq_vlans[i].enable &&
+		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
+		    aq_vlans[i].vlan_id == vlan) {
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/* Function rebuilds array of vlan filters so that filters with assigned
+ * queue have a precedence over just vlans on the interface.
+ */
+static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
+			     unsigned long *active_vlans,
+			     struct aq_rx_filter_vlan *aq_vlans)
+{
+	bool vlan_busy = false;
+	int vlan = -1;
+	int i;
+
+	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+		if (aq_vlans[i].enable &&
+		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
+			continue;
+		do {
+			vlan = find_next_bit(active_vlans,
+					     VLAN_N_VID,
+					     vlan + 1);
+			if (vlan == VLAN_N_VID) {
+				aq_vlans[i].enable = 0U;
+				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+				aq_vlans[i].vlan_id = 0;
+				continue;
+			}
+
+			vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
+			if (!vlan_busy) {
+				aq_vlans[i].enable = 1U;
+				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+				aq_vlans[i].vlan_id = vlan;
+			}
+		} while (vlan_busy && vlan != VLAN_N_VID);
+	}
+}
+
+static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
+			     struct aq_rx_filter *aq_rx_fltr,
+			     struct aq_rx_filter_vlan *aq_vlans, bool add)
+{
+	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+	int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
+	int i;
+
+	memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
+
+	if (!add)
+		return 0;
+
+	/* remove vlan if it was in table without queue assignment */
+	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+		if (aq_vlans[i].vlan_id ==
+		   (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
+			aq_vlans[i].enable = false;
+		}
+	}
+
+	aq_vlans[location].location = location;
+	aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
+				     & VLAN_VID_MASK;
+	aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
+	aq_vlans[location].enable = 1U;
+
+	return 0;
+}
+
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct aq_rx_filter *rule = NULL;
+	struct hlist_node *aq_node2;
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node) {
+		if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+			break;
+	}
+	if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+		struct ethtool_rxnfc cmd;
+
+		cmd.fs.location = rule->aq_fsp.location;
+		return aq_del_rxnfc_rule(aq_nic, &cmd);
+	}
+
+	return -ENOENT;
+}
+
+static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
+			    struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+		return -EOPNOTSUPP;
+
+	aq_set_data_fvlan(aq_nic,
+			  aq_rx_fltr,
+			  aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
+			  add);
+
+	return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
+			     struct aq_rx_filter *aq_rx_fltr,
+			     struct aq_rx_filter_l3l4 *data, bool add)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+	memset(data, 0, sizeof(*data));
+
+	data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
+	data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
+
+	if (!add) {
+		if (!data->is_ipv6)
+			rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
+		else
+			rx_fltrs->fl3l4.active_ipv6 &=
+				~BIT((data->location) / 4);
+
+		return 0;
+	}
+
+	data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
+
+	switch (fsp->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+		break;
+	case UDP_V4_FLOW:
+	case UDP_V6_FLOW:
+		data->cmd |= HW_ATL_RX_UDP;
+		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+		break;
+	case SCTP_V4_FLOW:
+	case SCTP_V6_FLOW:
+		data->cmd |= HW_ATL_RX_SCTP;
+		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+		break;
+	default:
+		break;
+	}
+
+	if (!data->is_ipv6) {
+		data->ip_src[0] =
+			ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
+		data->ip_dst[0] =
+			ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
+		rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
+	} else {
+		int i;
+
+		rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
+		for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+			data->ip_dst[i] =
+				ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
+			data->ip_src[i] =
+				ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
+		}
+		data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
+	}
+	if (fsp->flow_type != IP_USER_FLOW &&
+	    fsp->flow_type != IPV6_USER_FLOW) {
+		if (!data->is_ipv6) {
+			data->p_dst =
+				ntohs(fsp->h_u.tcp_ip4_spec.pdst);
+			data->p_src =
+				ntohs(fsp->h_u.tcp_ip4_spec.psrc);
+		} else {
+			data->p_dst =
+				ntohs(fsp->h_u.tcp_ip6_spec.pdst);
+			data->p_src =
+				ntohs(fsp->h_u.tcp_ip6_spec.psrc);
+		}
+	}
+	if (data->ip_src[0] && !data->is_ipv6)
+		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
+	if (data->ip_dst[0] && !data->is_ipv6)
+		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
+	if (data->p_dst)
+		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
+	if (data->p_src)
+		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
+	if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+		data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+		data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+		data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
+	} else {
+		data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+	}
+
+	return 0;
+}
+
+static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
+			const struct aq_hw_ops *aq_hw_ops,
+			struct aq_rx_filter_l3l4 *data)
+{
+	if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
+		return -EOPNOTSUPP;
+
+	return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
+}
+
+static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
+			    struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+	struct aq_rx_filter_l3l4 data;
+
+	if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
+		     aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4  ||
+		     aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
+		return -EINVAL;
+
+	return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
+}
+
+static int aq_add_del_rule(struct aq_nic_s *aq_nic,
+			   struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+	int err = -EINVAL;
+
+	if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
+		if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+		    == VLAN_VID_MASK) {
+			aq_rx_fltr->type = aq_rx_filter_vlan;
+			err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
+		} else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+			== VLAN_PRIO_MASK) {
+			aq_rx_fltr->type = aq_rx_filter_ethertype;
+			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+		}
+	} else {
+		switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
+		case ETHER_FLOW:
+			aq_rx_fltr->type = aq_rx_filter_ethertype;
+			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+			break;
+		case TCP_V4_FLOW:
+		case UDP_V4_FLOW:
+		case SCTP_V4_FLOW:
+		case IP_USER_FLOW:
+		case TCP_V6_FLOW:
+		case UDP_V6_FLOW:
+		case SCTP_V6_FLOW:
+		case IPV6_USER_FLOW:
+			aq_rx_fltr->type = aq_rx_filter_l3l4;
+			err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
+			break;
+		default:
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	return err;
+}
+
+static int aq_update_table_filters(struct aq_nic_s *aq_nic,
+				   struct aq_rx_filter *aq_rx_fltr, u16 index,
+				   struct ethtool_rxnfc *cmd)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct aq_rx_filter *rule = NULL, *parent = NULL;
+	struct hlist_node *aq_node2;
+	int err = -EINVAL;
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node) {
+		if (rule->aq_fsp.location >= index)
+			break;
+		parent = rule;
+	}
+
+	if (rule && rule->aq_fsp.location == index) {
+		err = aq_add_del_rule(aq_nic, rule, false);
+		hlist_del(&rule->aq_node);
+		kfree(rule);
+		--rx_fltrs->active_filters;
+	}
+
+	if (unlikely(!aq_rx_fltr))
+		return err;
+
+	INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
+
+	if (parent)
+		hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
+	else
+		hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
+
+	++rx_fltrs->active_filters;
+
+	return 0;
+}
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+	return rx_fltrs->active_filters;
+}
+
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
+{
+	return &aq_nic->aq_hw_rx_fltrs;
+}
+
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct aq_rx_filter *aq_rx_fltr;
+	int err = 0;
+
+	err = aq_check_rule(aq_nic, fsp);
+	if (err)
+		goto err_exit;
+
+	aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
+	if (unlikely(!aq_rx_fltr)) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
+
+	err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
+	if (unlikely(err))
+		goto err_free;
+
+	err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
+	if (unlikely(err)) {
+		hlist_del(&aq_rx_fltr->aq_node);
+		--rx_fltrs->active_filters;
+		goto err_free;
+	}
+
+	return 0;
+
+err_free:
+	kfree(aq_rx_fltr);
+err_exit:
+	return err;
+}
+
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct aq_rx_filter *rule = NULL;
+	struct hlist_node *aq_node2;
+	int err = -EINVAL;
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node) {
+		if (rule->aq_fsp.location == cmd->fs.location)
+			break;
+	}
+
+	if (rule && rule->aq_fsp.location == cmd->fs.location) {
+		err = aq_add_del_rule(aq_nic, rule, false);
+		hlist_del(&rule->aq_node);
+		kfree(rule);
+		--rx_fltrs->active_filters;
+	}
+	return err;
+}
+
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct ethtool_rx_flow_spec *fsp =
+			(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct aq_rx_filter *rule = NULL;
+	struct hlist_node *aq_node2;
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node)
+		if (fsp->location <= rule->aq_fsp.location)
+			break;
+
+	if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
+		return -EINVAL;
+
+	memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
+
+	return 0;
+}
+
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+			   u32 *rule_locs)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct hlist_node *aq_node2;
+	struct aq_rx_filter *rule;
+	int count = 0;
+
+	cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node) {
+		if (unlikely(count == cmd->rule_cnt))
+			return -EMSGSIZE;
+
+		rule_locs[count++] = rule->aq_fsp.location;
+	}
+
+	cmd->rule_cnt = count;
+
+	return 0;
+}
+
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct hlist_node *aq_node2;
+	struct aq_rx_filter *rule;
+	int err = 0;
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node) {
+		err = aq_add_del_rule(aq_nic, rule, false);
+		if (err)
+			goto err_exit;
+		hlist_del(&rule->aq_node);
+		kfree(rule);
+		--rx_fltrs->active_filters;
+	}
+
+err_exit:
+	return err;
+}
+
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+	struct hlist_node *aq_node2;
+	struct aq_rx_filter *rule;
+	int err = 0;
+
+	hlist_for_each_entry_safe(rule, aq_node2,
+				  &rx_fltrs->filter_list, aq_node) {
+		err = aq_add_del_rule(aq_nic, rule, true);
+		if (err)
+			goto err_exit;
+	}
+
+err_exit:
+	return err;
+}
+
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+{
+	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+	int hweight = 0;
+	int err = 0;
+	int i;
+
+	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+		return -EOPNOTSUPP;
+	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+		return -EOPNOTSUPP;
+
+	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+		for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
+			hweight += hweight_long(aq_nic->active_vlans[i]);
+
+		err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+		if (err)
+			return err;
+	}
+
+	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+					   );
+	if (err)
+		return err;
+
+	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+		if (hweight < AQ_VLAN_MAX_FILTERS)
+			err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+		/* otherwise left in promiscue mode */
+	}
+
+	return err;
+}
+
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
+{
+	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+	int err = 0;
+
+	memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+		return -EOPNOTSUPP;
+	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+		return -EOPNOTSUPP;
+
+	err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+	if (err)
+		return err;
+	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+					   );
+	return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
new file mode 100644
index 0000000..c6a08c6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.h: RX filters related functions. */
+
+#ifndef AQ_FILTERS_H
+#define AQ_FILTERS_H
+
+#include "aq_nic.h"
+
+enum aq_rx_filter_type {
+	aq_rx_filter_ethertype,
+	aq_rx_filter_vlan,
+	aq_rx_filter_l3l4
+};
+
+struct aq_rx_filter {
+	struct hlist_node aq_node;
+	enum aq_rx_filter_type type;
+	struct ethtool_rx_flow_spec aq_fsp;
+};
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+			   u32 *rule_locs);
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
+
+#endif /* AQ_FILTERS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a1e70da..81aab73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,17 @@
 #include "aq_rss.h"
 #include "hw_atl/hw_atl_utils.h"
 
+#define AQ_RX_FIRST_LOC_FVLANID     0U
+#define AQ_RX_LAST_LOC_FVLANID	   15U
+#define AQ_RX_FIRST_LOC_FETHERT    16U
+#define AQ_RX_LAST_LOC_FETHERT	   31U
+#define AQ_RX_FIRST_LOC_FL3L4	   32U
+#define AQ_RX_LAST_LOC_FL3L4	   39U
+#define AQ_RX_MAX_RXNFC_LOC	   AQ_RX_LAST_LOC_FL3L4
+#define AQ_VLAN_MAX_FILTERS   \
+			(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
+#define AQ_RX_QUEUE_NOT_ASSIGNED   0xFFU
+
 /* NIC H/W capabilities */
 struct aq_hw_caps_s {
 	u64 hw_features;
@@ -130,6 +141,7 @@ struct aq_hw_s {
 struct aq_ring_s;
 struct aq_ring_param_s;
 struct sk_buff;
+struct aq_rx_filter_l3l4;
 
 struct aq_hw_ops {
 
@@ -183,6 +195,23 @@ struct aq_hw_ops {
 	int (*hw_packet_filter_set)(struct aq_hw_s *self,
 				    unsigned int packet_filter);
 
+	int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
+				  struct aq_rx_filter_l3l4 *data);
+
+	int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
+				    struct aq_rx_filter_l3l4 *data);
+
+	int (*hw_filter_l2_set)(struct aq_hw_s *self,
+				struct aq_rx_filter_l2 *data);
+
+	int (*hw_filter_l2_clear)(struct aq_hw_s *self,
+				  struct aq_rx_filter_l2 *data);
+
+	int (*hw_filter_vlan_set)(struct aq_hw_s *self,
+				  struct aq_rx_filter_vlan *aq_vlans);
+
+	int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
+
 	int (*hw_multicast_list_set)(struct aq_hw_s *self,
 				     u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
 				     [ETH_ALEN],
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 7c07eef..2a11c1e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,6 +13,7 @@
 #include "aq_nic.h"
 #include "aq_pci_func.h"
 #include "aq_ethtool.h"
+#include "aq_filters.h"
 
 #include <linux/netdevice.h>
 #include <linux/module.h>
@@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev)
 	err = aq_nic_init(aq_nic);
 	if (err < 0)
 		goto err_exit;
+
+	err = aq_reapply_rxnfc_all_rules(aq_nic);
+	if (err < 0)
+		goto err_exit;
+
 	err = aq_nic_start(aq_nic);
 	if (err < 0)
 		goto err_exit;
@@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev,
 	bool is_lro = false;
 	int err = 0;
 
+	if (!(features & NETIF_F_NTUPLE)) {
+		if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
+			err = aq_clear_rxnfc_all_rules(aq_nic);
+			if (unlikely(err))
+				goto err_exit;
+		}
+	}
+	if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+		if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+			err = aq_filters_vlan_offload_off(aq_nic);
+			if (unlikely(err))
+				goto err_exit;
+		}
+	}
+
 	aq_cfg->features = features;
 
 	if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
@@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev,
 		err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
 							aq_cfg);
 
+err_exit:
 	return err;
 }
 
@@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
 	aq_nic_set_multicast_list(aq_nic, ndev);
 }
 
+static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+				  u16 vid)
+{
+	struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+		return -EOPNOTSUPP;
+
+	set_bit(vid, aq_nic->active_vlans);
+
+	return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
+				   u16 vid)
+{
+	struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+		return -EOPNOTSUPP;
+
+	clear_bit(vid, aq_nic->active_vlans);
+
+	if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
+		return aq_filters_vlans_update(aq_nic);
+
+	return 0;
+}
+
 static const struct net_device_ops aq_ndev_ops = {
 	.ndo_open = aq_ndev_open,
 	.ndo_stop = aq_ndev_close,
@@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
 	.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
 	.ndo_change_mtu = aq_ndev_change_mtu,
 	.ndo_set_mac_address = aq_ndev_set_mac_address,
-	.ndo_set_features = aq_ndev_set_features
+	.ndo_set_features = aq_ndev_set_features,
+	.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7abdc09..279ea58 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -84,8 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
 
 	cfg->is_lro = AQ_CFG_IS_LRO_DEF;
 
-	cfg->vlan_id = 0U;
-
 	aq_nic_rss_init(self, cfg->num_rss_queues);
 
 	/*descriptors */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 44ec47a..8e34c1e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
 	u32 mtu;
 	u32 flow_control;
 	u32 link_speed_msk;
-	u32 vlan_id;
 	u32 wol;
 	u16 is_mc_list_enabled;
 	u16 mc_list_count;
@@ -61,6 +60,23 @@ struct aq_nic_cfg_s {
 #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
 	((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
 
+struct aq_hw_rx_fl2 {
+	struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
+};
+
+struct aq_hw_rx_fl3l4 {
+	u8   active_ipv4;
+	u8   active_ipv6:2;
+	u8 is_ipv6;
+};
+
+struct aq_hw_rx_fltrs_s {
+	struct hlist_head     filter_list;
+	u16                   active_filters;
+	struct aq_hw_rx_fl2   fl2;
+	struct aq_hw_rx_fl3l4 fl3l4;
+};
+
 struct aq_nic_s {
 	atomic_t flags;
 	struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
@@ -81,10 +97,13 @@ struct aq_nic_s {
 		u32 count;
 		u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
 	} mc_list;
+	/* Bitmask of currently assigned vlans from linux */
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 
 	struct pci_dev *pdev;
 	unsigned int msix_entry_mask;
 	u32 irqvecs;
+	struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
 };
 
 static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 1d5d6b8..c8b44cd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -19,6 +19,7 @@
 #include "aq_pci_func.h"
 #include "hw_atl/hw_atl_a0.h"
 #include "hw_atl/hw_atl_b0.h"
+#include "aq_filters.h"
 
 static const struct pci_device_id aq_pci_tbl[] = {
 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
 	struct aq_nic_s *self = pci_get_drvdata(pdev);
 
 	if (self->ndev) {
+		aq_clear_rxnfc_all_rules(self);
 		if (self->ndev->reg_state == NETREG_REGISTERED)
 			unregister_netdev(self->ndev);
 		aq_nic_free_vectors(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index f02592f..6af7d7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -41,7 +41,9 @@
 			NETIF_F_RXHASH |  \
 			NETIF_F_SG |      \
 			NETIF_F_TSO |     \
-			NETIF_F_LRO,      \
+			NETIF_F_LRO |     \
+			NETIF_F_NTUPLE |  \
+			NETIF_F_HW_VLAN_CTAG_FILTER, \
 	.hw_priv_flags = IFF_UNICAST_FLT, \
 	.flow_control = true,		  \
 	.mtu = HW_ATL_B0_MTU_JUMBO,	  \
@@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
 	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
 	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
 
-	if (cfg->vlan_id) {
-		hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
-		hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
-		hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
+	hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
 
-		hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
-		hw_atl_rpf_vlan_untagged_act_set(self, 1U);
-
-		hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
-		hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
-		hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
-	} else {
-		hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
-	}
+	// Always accept untagged packets
+	hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+	hw_atl_rpf_vlan_untagged_act_set(self, 1U);
 
 	/* Rx Interrupts */
 	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
@@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
 	return aq_hw_err_from_flags(self);
 }
 
+static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
+				    struct aq_rx_filter_l3l4 *data)
+{
+	u8 location = data->location;
+
+	if (!data->is_ipv6) {
+		hw_atl_rpfl3l4_cmd_clear(self, location);
+		hw_atl_rpf_l4_spd_set(self, 0U, location);
+		hw_atl_rpf_l4_dpd_set(self, 0U, location);
+		hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+		hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+	} else {
+		int i;
+
+		for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+			hw_atl_rpfl3l4_cmd_clear(self, location + i);
+			hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+			hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+		}
+		hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+		hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+	}
+
+	return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
+				  struct aq_rx_filter_l3l4 *data)
+{
+	u8 location = data->location;
+
+	hw_atl_b0_hw_fl3l4_clear(self, data);
+
+	if (data->cmd) {
+		if (!data->is_ipv6) {
+			hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+							  location,
+							  data->ip_dst[0]);
+			hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+							 location,
+							 data->ip_src[0]);
+		} else {
+			hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+							  location,
+							  data->ip_dst);
+			hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+							 location,
+							 data->ip_src);
+		}
+	}
+	hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+	hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+	hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+	return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
+				struct aq_rx_filter_l2 *data)
+{
+	hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
+	hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
+	hw_atl_rpf_etht_user_priority_en_set(self,
+					     !!data->user_priority_en,
+					     data->location);
+	if (data->user_priority_en)
+		hw_atl_rpf_etht_user_priority_set(self,
+						  data->user_priority,
+						  data->location);
+
+	if (data->queue < 0) {
+		hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
+		hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
+	} else {
+		hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
+		hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
+		hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
+	}
+
+	return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
+				  struct aq_rx_filter_l2 *data)
+{
+	hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
+	hw_atl_rpf_etht_flr_set(self, 0U, data->location);
+	hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
+
+	return aq_hw_err_from_flags(self);
+}
+
+/**
+ * @brief Set VLAN filter table
+ * @details Configure VLAN filter table to accept (and assign the queue) traffic
+ *  for the particular vlan ids.
+ * Note: use this function under vlan promisc mode not to lost the traffic
+ *
+ * @param aq_hw_s
+ * @param aq_rx_filter_vlan VLAN filter configuration
+ * @return 0 - OK, <0 - error
+ */
+static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
+				 struct aq_rx_filter_vlan *aq_vlans)
+{
+	int i;
+
+	for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
+		hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+		hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+		if (aq_vlans[i].enable) {
+			hw_atl_rpf_vlan_id_flr_set(self,
+						   aq_vlans[i].vlan_id,
+						   i);
+			hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+			hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+			if (aq_vlans[i].queue != 0xFF) {
+				hw_atl_rpf_vlan_rxq_flr_set(self,
+							    aq_vlans[i].queue,
+							    i);
+				hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+			}
+		}
+	}
+
+	return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+	/* set promisc in case of disabing the vland filter */
+	hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+
+	return aq_hw_err_from_flags(self);
+}
+
 const struct aq_hw_ops hw_atl_ops_b0 = {
 	.hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
 	.hw_init              = hw_atl_b0_hw_init,
@@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
 	.hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
 	.hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
 	.hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
+	.hw_filter_l2_set            = hw_atl_b0_hw_fl2_set,
+	.hw_filter_l2_clear          = hw_atl_b0_hw_fl2_clear,
+	.hw_filter_l3l4_set          = hw_atl_b0_hw_fl3l4_set,
+	.hw_filter_vlan_set          = hw_atl_b0_hw_vlan_set,
+	.hw_filter_vlan_ctrl         = hw_atl_b0_hw_vlan_ctrl,
 	.hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
 	.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
 	.hw_rss_set                  = hw_atl_b0_hw_rss_set,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 5502ec5f..939f77e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
 			    vlan_id_flr);
 }
 
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+				    u32 filter)
+{
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
+			    HW_ATL_RPF_VL_RXQ_EN_F_MSK,
+			    HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
+			    vlan_rxq_en);
+}
+
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+				 u32 filter)
+{
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
+			    HW_ATL_RPF_VL_RXQ_F_MSK,
+			    HW_ATL_RPF_VL_RXQ_F_SHIFT,
+			    vlan_rxq);
+};
+
 void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
 				u32 filter)
 {
@@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
 			    HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
 }
 
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
+			    HW_ATL_RPF_L4_SPD_MSK,
+			    HW_ATL_RPF_L4_SPD_SHIFT, val);
+}
+
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
+			    HW_ATL_RPF_L4_DPD_MSK,
+			    HW_ATL_RPF_L4_DPD_SHIFT, val);
+}
+
 /* RPO: rx packet offload */
 void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
 					      u32 ipv4header_crc_offload_en)
@@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
 			    HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
 			    up_force_intr);
 }
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+	aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+	aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+	aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+	int i;
+
+	for (i = 0; i < 4; ++i)
+		aq_hw_write_reg(aq_hw,
+				HW_ATL_RPF_L3_DSTA_ADR(location + i),
+				0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+	int i;
+
+	for (i = 0; i < 4; ++i)
+		aq_hw_write_reg(aq_hw,
+				HW_ATL_RPF_L3_SRCA_ADR(location + i),
+				0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				       u32 ipv4_dest)
+{
+	aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
+			ipv4_dest);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				      u32 ipv4_src)
+{
+	aq_hw_write_reg(aq_hw,
+			HW_ATL_RPF_L3_SRCA_ADR(location),
+			ipv4_src);
+}
+
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+{
+	aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				      u32 *ipv6_src)
+{
+	int i;
+
+	for (i = 0; i < 4; ++i)
+		aq_hw_write_reg(aq_hw,
+				HW_ATL_RPF_L3_SRCA_ADR(location + i),
+				ipv6_src[i]);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				       u32 *ipv6_dest)
+{
+	int i;
+
+	for (i = 0; i < 4; ++i)
+		aq_hw_write_reg(aq_hw,
+				HW_ATL_RPF_L3_DSTA_ADR(location + i),
+				ipv6_dest[i]);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 41f2399..03c570d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
 void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
 				u32 filter);
 
+/* Set VLAN RX queue assignment enable */
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+				    u32 filter);
+
+/* Set VLAN RX queue */
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+				 u32 filter);
+
 /* set ethertype filter enable */
 void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
 				u32 filter);
@@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
 /* set ethertype filter */
 void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
 
+/* set L4 source port */
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* set L4 destination port */
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
 /* rpo */
 
 /* set ipv4 header checksum offload enable */
@@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
 /* set uP Force Interrupt */
 void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
 
+/* clear ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* set ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				       u32 ipv4_dest);
+
+/* set ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				      u32 ipv4_src);
+
+/* set command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
+
+/* set ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				      u32 *ipv6_src);
+
+/* set ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+				       u32 *ipv6_dest);
+
 #endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index a715fa3..8470d92d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1092,24 +1092,43 @@
 /* Default value of bitfield vl_id{F}[B:0] */
 #define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
 
-/* RX et_en{F} Bitfield Definitions
- * Preprocessor definitions for the bitfield "et_en{F}".
+/* RX vl_rxq_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}".
  * Parameter: filter {F} | stride size 0x4 | range [0, 15]
- * PORT="pif_rpf_et_en_i[0]"
+ * PORT="pif_rpf_vl_rxq_en_i"
  */
 
-/* Register address for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
-/* Bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
-/* Inverted bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
-/* Lower bit position of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_SHIFT 31
-/* Width of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_WIDTH 1
-/* Default value of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
+/* Register address for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
+/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
+/* Lower bit position of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
+/* Width of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
+/* Default value of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
+
+/* RX vl_rxq{F}[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq0_i[4:0]"
+ */
+
+/* Register address for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
+/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
+/* Lower bit position of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
+/* Width of bitfield vl_rxw{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
+/* Default value of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
 
 /* rx et_en{f} bitfield definitions
  * preprocessor definitions for the bitfield "et_en{f}".
@@ -1263,6 +1282,44 @@
 /* default value of bitfield et_val{f}[f:0] */
 #define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
 
+/* RX l4_sp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
+ * Parameter: srcport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_sp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
+/* Bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_SHIFT 0
+/* Width of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_WIDTH 16
+/* Default value of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
+
+/* RX l4_dp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
+ * Parameter: destport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_dp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
+/* Bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_SHIFT 0
+/* Width of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_WIDTH 16
+/* Default value of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
+
 /* rx ipv4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "ipv4_chk_en".
  * port="pif_rpo_ipv4_chk_en_i"
@@ -2418,4 +2475,48 @@
 /* default value of bitfield uP Force Interrupt */
 #define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
 
+#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4   0x00005380
+#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4   0x000053B0
+#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4  0x000053D0
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
 #endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 7def1cb..1af6606 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -454,8 +454,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 			       (fw.val =
 				aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
 				fw.tid), 1000U, 100U);
-		if (err < 0)
-			goto err_exit;
 
 		if (fw.len == 0xFFFFU) {
 			err = hw_atl_utils_fw_rpc_call(self, sw.len);
@@ -463,8 +461,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 				goto err_exit;
 		}
 	} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
-	if (err < 0)
-		goto err_exit;
 
 	if (rpc) {
 		if (fw.len) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 3613fca..48278e3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -240,6 +240,64 @@ struct __packed offload_info {
 	u8 buf[0];
 };
 
+enum hw_atl_rx_action_with_traffic {
+	HW_ATL_RX_DISCARD,
+	HW_ATL_RX_HOST,
+};
+
+struct aq_rx_filter_vlan {
+	u8 enable;
+	u8 location;
+	u16 vlan_id;
+	u8 queue;
+};
+
+struct aq_rx_filter_l2 {
+	s8 queue;
+	u8 location;
+	u8 user_priority_en;
+	u8 user_priority;
+	u16 ethertype;
+};
+
+struct aq_rx_filter_l3l4 {
+	u32 cmd;
+	u8 location;
+	u32 ip_dst[4];
+	u32 ip_src[4];
+	u16 p_dst;
+	u16 p_src;
+	u8 is_ipv6;
+};
+
+enum hw_atl_rx_protocol_value_l3l4 {
+	HW_ATL_RX_TCP,
+	HW_ATL_RX_UDP,
+	HW_ATL_RX_SCTP,
+	HW_ATL_RX_ICMP
+};
+
+enum hw_atl_rx_ctrl_registers_l3l4 {
+	HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
+	HW_ATL_RX_ENABLE_QUEUE_L3L4        = BIT(23),
+	HW_ATL_RX_ENABLE_ARP_FLTR_L3       = BIT(24),
+	HW_ATL_RX_ENABLE_CMP_PROT_L4       = BIT(25),
+	HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4  = BIT(26),
+	HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4   = BIT(27),
+	HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3  = BIT(28),
+	HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3   = BIT(29),
+	HW_ATL_RX_ENABLE_L3_IPV6           = BIT(30),
+	HW_ATL_RX_ENABLE_FLTR_L3L4         = BIT(31)
+};
+
+#define HW_ATL_RX_QUEUE_FL3L4_SHIFT       8U
+#define HW_ATL_RX_ACTION_FL3F4_SHIFT      16U
+
+#define HW_ATL_RX_CNT_REG_ADDR_IPV6       4U
+
+#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
+	((location) - AQ_RX_FIRST_LOC_FL3L4)
+
 #define HAL_ATLANTIC_UTILS_CHIP_MIPS         0x00000001U
 #define HAL_ATLANTIC_UTILS_CHIP_TPO2         0x00000002U
 #define HAL_ATLANTIC_UTILS_CHIP_RPF2         0x00000004U
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index bd277b0..4406325 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -432,7 +432,8 @@ static int arc_emac_open(struct net_device *ndev)
 	phy_dev->autoneg = AUTONEG_ENABLE;
 	phy_dev->speed = 0;
 	phy_dev->duplex = 0;
-	phy_dev->advertising &= phy_dev->supported;
+	linkmode_and(phy_dev->advertising, phy_dev->advertising,
+		     phy_dev->supported);
 
 	priv->last_rx_bd = 0;
 
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e445ab7..f448089 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2248,6 +2248,7 @@ static void b44_adjust_link(struct net_device *dev)
 
 static int b44_register_phy_one(struct b44 *bp)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 	struct mii_bus *mii_bus;
 	struct ssb_device *sdev = bp->sdev;
 	struct phy_device *phydev;
@@ -2303,11 +2304,12 @@ static int b44_register_phy_one(struct b44 *bp)
 	}
 
 	/* mask with MAC supported features */
-	phydev->supported &= (SUPPORTED_100baseT_Half |
-			      SUPPORTED_100baseT_Full |
-			      SUPPORTED_Autoneg |
-			      SUPPORTED_MII);
-	phydev->advertising = phydev->supported;
+	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+	linkmode_and(phydev->supported, phydev->supported, mask);
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	bp->old_link = 0;
 	bp->phy_addr = phydev->mdio.addr;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0e2d99c..4574275 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
 
 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 {
+	unsigned int index;
 	u32 reg;
 
 	/* Disable RXCHK, active filters and Broadcom tag matching */
@@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 		 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
 	rxchk_writel(priv, reg, RXCHK_CONTROL);
 
+	/* Make sure we restore correct CID index in case HW lost
+	 * its context during deep idle state
+	 */
+	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+		rxchk_writel(priv, priv->filters_loc[index] <<
+			     RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
+		rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+	}
+
 	/* Clear the MagicPacket detection logic */
 	mpd_enable_set(priv, false);
 
@@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
 	rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
 	rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
 
+	priv->filters_loc[index] = nfc->fs.location;
 	set_bit(index, priv->filters);
 
 	return 0;
@@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
 	 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
 	 */
 	clear_bit(index, priv->filters);
+	priv->filters_loc[index] = 0;
 
 	return 0;
 }
@@ -2312,7 +2324,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
 	struct bcm_sysport_priv *priv;
 	struct net_device *slave_dev;
 	unsigned int num_tx_queues;
-	unsigned int q, start, port;
+	unsigned int q, qp, port;
 	struct net_device *dev;
 
 	priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
@@ -2351,20 +2363,61 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
 
 	priv->per_port_num_tx_queues = num_tx_queues;
 
-	start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
-	for (q = 0; q < num_tx_queues; q++) {
-		ring = &priv->tx_rings[q + start];
+	for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
+	     q++) {
+		ring = &priv->tx_rings[q];
+
+		if (ring->inspect)
+			continue;
 
 		/* Just remember the mapping actual programming done
 		 * during bcm_sysport_init_tx_ring
 		 */
-		ring->switch_queue = q;
+		ring->switch_queue = qp;
 		ring->switch_port = port;
 		ring->inspect = true;
 		priv->ring_map[q + port * num_tx_queues] = ring;
+		qp++;
+	}
 
-		/* Set all queues as being used now */
-		set_bit(q + start, &priv->queue_bitmap);
+	return 0;
+}
+
+static int bcm_sysport_unmap_queues(struct notifier_block *nb,
+				    struct dsa_notifier_register_info *info)
+{
+	struct bcm_sysport_tx_ring *ring;
+	struct bcm_sysport_priv *priv;
+	struct net_device *slave_dev;
+	unsigned int num_tx_queues;
+	struct net_device *dev;
+	unsigned int q, port;
+
+	priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
+	if (priv->netdev != info->master)
+		return 0;
+
+	dev = info->master;
+
+	if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+		return 0;
+
+	port = info->port_number;
+	slave_dev = info->info.dev;
+
+	num_tx_queues = slave_dev->real_num_tx_queues;
+
+	for (q = 0; q < dev->num_tx_queues; q++) {
+		ring = &priv->tx_rings[q];
+
+		if (ring->switch_port != port)
+			continue;
+
+		if (!ring->inspect)
+			continue;
+
+		ring->inspect = false;
+		priv->ring_map[q + port * num_tx_queues] = NULL;
 	}
 
 	return 0;
@@ -2373,14 +2426,18 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
 static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
 				    unsigned long event, void *ptr)
 {
-	struct dsa_notifier_register_info *info;
+	int ret = NOTIFY_DONE;
 
-	if (event != DSA_PORT_REGISTER)
-		return NOTIFY_DONE;
+	switch (event) {
+	case DSA_PORT_REGISTER:
+		ret = bcm_sysport_map_queues(nb, ptr);
+		break;
+	case DSA_PORT_UNREGISTER:
+		ret = bcm_sysport_unmap_queues(nb, ptr);
+		break;
+	}
 
-	info = ptr;
-
-	return notifier_from_errno(bcm_sysport_map_queues(nb, info));
+	return notifier_from_errno(ret);
 }
 
 #define REV_FMT	"v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index a7a2308..0887e63 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -786,6 +786,7 @@ struct bcm_sysport_priv {
 	/* Ethtool */
 	u32			msg_enable;
 	DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
+	u32			filters_loc[RXCHK_BRCM_TAG_MAX];
 
 	struct bcm_sysport_stats64	stats64;
 
@@ -795,7 +796,6 @@ struct bcm_sysport_priv {
 	/* map information between switch port queues and local queues */
 	struct notifier_block	dsa_notifier;
 	unsigned int		per_port_num_tx_queues;
-	unsigned long		queue_bitmap;
 	struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
 
 };
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d83233a..510dfc1 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5731,7 +5731,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
 		if (realdev) {
 			dev = cnic_from_netdev(realdev);
 			if (dev) {
-				vid |= VLAN_TAG_PRESENT;
+				vid |= VLAN_CFI_MASK;	/* make non-zero */
 				cnic_rcv_netevent(dev->cnic_priv, event, vid);
 				cnic_put(dev);
 			}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d6f090..983245c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
 		break;
 	}
 
-	return 0;
+	return ret;
 }
 
 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -3612,36 +3612,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM_SLEEP
-static int bcmgenet_suspend(struct device *d)
-{
-	struct net_device *dev = dev_get_drvdata(d);
-	struct bcmgenet_priv *priv = netdev_priv(dev);
-	int ret = 0;
-
-	if (!netif_running(dev))
-		return 0;
-
-	netif_device_detach(dev);
-
-	bcmgenet_netif_stop(dev);
-
-	if (!device_may_wakeup(d))
-		phy_suspend(dev->phydev);
-
-	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
-	if (device_may_wakeup(d) && priv->wolopts) {
-		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
-		clk_prepare_enable(priv->clk_wol);
-	} else if (priv->internal_phy) {
-		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
-	}
-
-	/* Turn off the clocks */
-	clk_disable_unprepare(priv->clk);
-
-	return ret;
-}
-
 static int bcmgenet_resume(struct device *d)
 {
 	struct net_device *dev = dev_get_drvdata(d);
@@ -3719,6 +3689,39 @@ static int bcmgenet_resume(struct device *d)
 	clk_disable_unprepare(priv->clk);
 	return ret;
 }
+
+static int bcmgenet_suspend(struct device *d)
+{
+	struct net_device *dev = dev_get_drvdata(d);
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	int ret = 0;
+
+	if (!netif_running(dev))
+		return 0;
+
+	netif_device_detach(dev);
+
+	bcmgenet_netif_stop(dev);
+
+	if (!device_may_wakeup(d))
+		phy_suspend(dev->phydev);
+
+	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
+	if (device_may_wakeup(d) && priv->wolopts) {
+		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+		clk_prepare_enable(priv->clk_wol);
+	} else if (priv->internal_phy) {
+		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+	}
+
+	/* Turn off the clocks */
+	clk_disable_unprepare(priv->clk);
+
+	if (ret)
+		bcmgenet_resume(d);
+
+	return ret;
+}
 #endif /* CONFIG_PM_SLEEP */
 
 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 2fbd027..57582ef 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,6 +186,8 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
 	}
 
 	reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+	if (!(reg & MPD_EN))
+		return;	/* already powered up so skip the rest */
 	reg &= ~MPD_EN;
 	bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index a6cbaca..aceb9b7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -226,7 +226,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
 		 * capabilities, use that knowledge to also configure the
 		 * Reverse MII interface correctly.
 		 */
-		if (dev->phydev->supported & PHY_1000BT_FEATURES)
+		if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				      dev->phydev->supported))
 			port_ctrl = PORT_MODE_EXT_RVMII_50;
 		else
 			port_ctrl = PORT_MODE_EXT_RVMII_25;
@@ -317,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
 		return ret;
 	}
 
-	phydev->advertising = phydev->supported;
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	/* The internal PHY has its link interrupts routed to the
 	 * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 432c3b8..3b1397a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -66,11 +66,6 @@
 #include <uapi/linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
 
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
-
 #define BAR_0	0
 #define BAR_2	2
 
@@ -2157,7 +2152,8 @@ static void tg3_phy_start(struct tg3 *tp)
 		phydev->speed = tp->link_config.speed;
 		phydev->duplex = tp->link_config.duplex;
 		phydev->autoneg = tp->link_config.autoneg;
-		phydev->advertising = tp->link_config.advertising;
+		ethtool_convert_legacy_u32_to_link_mode(
+			phydev->advertising, tp->link_config.advertising);
 	}
 
 	phy_start(phydev);
@@ -4057,8 +4053,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
 		do_low_power = false;
 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
 			struct phy_device *phydev;
-			u32 phyid, advertising;
+			u32 phyid;
 
 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 
@@ -4067,25 +4064,33 @@ static int tg3_power_down_prepare(struct tg3 *tp)
 			tp->link_config.speed = phydev->speed;
 			tp->link_config.duplex = phydev->duplex;
 			tp->link_config.autoneg = phydev->autoneg;
-			tp->link_config.advertising = phydev->advertising;
+			ethtool_convert_link_mode_to_legacy_u32(
+				&tp->link_config.advertising,
+				phydev->advertising);
 
-			advertising = ADVERTISED_TP |
-				      ADVERTISED_Pause |
-				      ADVERTISED_Autoneg |
-				      ADVERTISED_10baseT_Half;
+			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
+			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+					 advertising);
+			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+					 advertising);
+			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+					 advertising);
 
 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
-				if (tg3_flag(tp, WOL_SPEED_100MB))
-					advertising |=
-						ADVERTISED_100baseT_Half |
-						ADVERTISED_100baseT_Full |
-						ADVERTISED_10baseT_Full;
-				else
-					advertising |= ADVERTISED_10baseT_Full;
+				if (tg3_flag(tp, WOL_SPEED_100MB)) {
+					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+							 advertising);
+					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+							 advertising);
+					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+							 advertising);
+				} else {
+					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+							 advertising);
+				}
 			}
 
-			phydev->advertising = advertising;
-
+			linkmode_copy(phydev->advertising, advertising);
 			phy_start_aneg(phydev);
 
 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
@@ -6135,10 +6140,16 @@ static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
 }
 
 /* tp->lock must be held */
-static u64 tg3_refclk_read(struct tg3 *tp)
+static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
 {
-	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
-	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+	u64 stamp;
+
+	ptp_read_system_prets(sts);
+	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
+	ptp_read_system_postts(sts);
+	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+
+	return stamp;
 }
 
 /* tp->lock must be held */
@@ -6229,13 +6240,14 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 	return 0;
 }
 
-static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+			    struct ptp_system_timestamp *sts)
 {
 	u64 ns;
 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 
 	tg3_full_lock(tp, 0);
-	ns = tg3_refclk_read(tp);
+	ns = tg3_refclk_read(tp, sts);
 	ns += tp->ptp_adjust;
 	tg3_full_unlock(tp);
 
@@ -6330,7 +6342,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
 	.pps		= 0,
 	.adjfreq	= tg3_ptp_adjfreq,
 	.adjtime	= tg3_ptp_adjtime,
-	.gettime64	= tg3_ptp_gettime,
+	.gettimex64	= tg3_ptp_gettimex,
 	.settime64	= tg3_ptp_settime,
 	.enable		= tg3_ptp_enable,
 };
@@ -16973,32 +16985,6 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
 	return err;
 }
 
-#ifdef CONFIG_SPARC
-static int tg3_get_macaddr_sparc(struct tg3 *tp)
-{
-	struct net_device *dev = tp->dev;
-	struct pci_dev *pdev = tp->pdev;
-	struct device_node *dp = pci_device_to_OF_node(pdev);
-	const unsigned char *addr;
-	int len;
-
-	addr = of_get_property(dp, "local-mac-address", &len);
-	if (addr && len == ETH_ALEN) {
-		memcpy(dev->dev_addr, addr, ETH_ALEN);
-		return 0;
-	}
-	return -ENODEV;
-}
-
-static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
-{
-	struct net_device *dev = tp->dev;
-
-	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
-	return 0;
-}
-#endif
-
 static int tg3_get_device_address(struct tg3 *tp)
 {
 	struct net_device *dev = tp->dev;
@@ -17006,10 +16992,8 @@ static int tg3_get_device_address(struct tg3 *tp)
 	int addr_ok = 0;
 	int err;
 
-#ifdef CONFIG_SPARC
-	if (!tg3_get_macaddr_sparc(tp))
+	if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
 		return 0;
-#endif
 
 	if (tg3_flag(tp, IS_SSB_CORE)) {
 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
@@ -17071,13 +17055,8 @@ static int tg3_get_device_address(struct tg3 *tp)
 		}
 	}
 
-	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
-#ifdef CONFIG_SPARC
-		if (!tg3_get_default_macaddr_sparc(tp))
-			return 0;
-#endif
+	if (!is_valid_ether_addr(&dev->dev_addr[0]))
 		return -EINVAL;
-	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 6aeb104..73632b8 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -277,10 +277,6 @@ static int cavium_ptp_probe(struct pci_dev *pdev,
 	writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
 
 	clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
-	if (!clock->ptp_clock) {
-		err = -ENODEV;
-		goto error_stop;
-	}
 	if (IS_ERR(clock->ptp_clock)) {
 		err = PTR_ERR(clock->ptp_clock);
 		goto error_stop;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4b3aecf..5359c10 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1080,8 +1080,11 @@ static int octeon_mgmt_open(struct net_device *netdev)
 	/* Set the mode of the interface, RGMII/MII. */
 	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
 		union cvmx_agl_prtx_ctl agl_prtx_ctl;
-		int rgmii_mode = (netdev->phydev->supported &
-				  (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
+		int rgmii_mode =
+			(linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+					   netdev->phydev->supported) |
+			 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+					   netdev->phydev->supported)) != 0;
 
 		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
 		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b16f4b3e..2d1ca92 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,7 @@ struct adapter_params {
 	bool fr_nsmr_tpte_wr_support;	  /* FW support for FR_NSMR_TPTE_WR */
 	u8 fw_caps_support;		/* 32-bit Port Capabilities */
 	bool filter2_wr_support;	/* FW support for FILTER2_WR */
+	unsigned int viid_smt_extn_support:1; /* FW returns vin and smt index */
 
 	/* MPS Buffer Group Map[per Port].  Bit i is set if buffer group i is
 	 * used by the Port
@@ -592,6 +593,13 @@ struct port_info {
 	bool ptp_enable;
 	struct sched_table *sched_tbl;
 	u32 eth_flags;
+
+	/* viid and smt fields either returned by fw
+	 * or decoded by parsing viid by driver.
+	 */
+	u8 vin;
+	u8 vivld;
+	u8 smt_idx;
 };
 
 struct dentry;
@@ -1757,7 +1765,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
-		unsigned int *rss_size);
+		unsigned int *rss_size, u8 *vivld, u8 *vin);
 int t4_free_vi(struct adapter *adap, unsigned int mbox,
 	       unsigned int pf, unsigned int vf,
 	       unsigned int viid);
@@ -1783,7 +1791,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
 		     unsigned int viid, unsigned int naddr,
 		     const u8 **addr, bool sleep_ok);
 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
-		  int idx, const u8 *addr, bool persist, bool add_smt);
+		  int idx, const u8 *addr, bool persist, u8 *smt_idx);
 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		     bool ucast, u64 vec, bool sleep_ok);
 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d49db46..7f76ad9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -453,7 +453,7 @@ static int link_start(struct net_device *dev)
 	if (ret == 0) {
 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
 				    pi->xact_addr_filt, dev->dev_addr, true,
-				    true);
+				    &pi->smt_idx);
 		if (ret >= 0) {
 			pi->xact_addr_filt = ret;
 			ret = 0;
@@ -1585,28 +1585,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
 
 /**
- *	cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
- *	@chip: chip type
- *	@viid: VI id of the given port
- *
- *	Return the SMT index for this VI.
- */
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
-{
-	/* In T4/T5, SMT contains 256 SMAC entries organized in
-	 * 128 rows of 2 entries each.
-	 * In T6, SMT contains 256 SMAC entries in 256 rows.
-	 * TODO: The below code needs to be updated when we add support
-	 * for 256 VFs.
-	 */
-	if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
-		return ((viid & 0x7f) << 1);
-	else
-		return (viid & 0x7f);
-}
-EXPORT_SYMBOL(cxgb4_tp_smt_idx);
-
-/**
  *	cxgb4_port_chan - get the HW channel of a port
  *	@dev: the net device for the port
  *
@@ -2280,8 +2258,6 @@ static int cxgb_up(struct adapter *adap)
 #if IS_ENABLED(CONFIG_IPV6)
 	update_clip(adap);
 #endif
-	/* Initialize hash mac addr list*/
-	INIT_LIST_HEAD(&adap->mac_hlist);
 	return err;
 
  irq_err:
@@ -2303,6 +2279,7 @@ static void cxgb_down(struct adapter *adapter)
 
 	t4_sge_stop(adapter);
 	t4_free_sge_resources(adapter);
+
 	adapter->flags &= ~FULL_INIT_DONE;
 }
 
@@ -2863,7 +2840,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
 		return -EADDRNOTAVAIL;
 
 	ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
-			    pi->xact_addr_filt, addr->sa_data, true, true);
+			    pi->xact_addr_filt, addr->sa_data, true,
+			    &pi->smt_idx);
 	if (ret < 0)
 		return ret;
 
@@ -4467,6 +4445,15 @@ static int adap_init0(struct adapter *adap)
 		adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
 	}
 
+	/* Check if FW supports returning vin and smt index.
+	 * If this is not supported, driver will interpret
+	 * these values from viid.
+	 */
+	params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+			      1, params, val);
+	adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
+
 	/*
 	 * Get device capabilities so we can determine what resources we need
 	 * to manage.
@@ -4777,14 +4764,26 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
 		return PCI_ERS_RESULT_DISCONNECT;
 
 	for_each_port(adap, i) {
-		struct port_info *p = adap2pinfo(adap, i);
+		struct port_info *pi = adap2pinfo(adap, i);
+		u8 vivld = 0, vin = 0;
 
-		ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
-				  NULL, NULL);
+		ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
+				  NULL, NULL, &vivld, &vin);
 		if (ret < 0)
 			return PCI_ERS_RESULT_DISCONNECT;
-		p->viid = ret;
-		p->xact_addr_filt = -1;
+		pi->viid = ret;
+		pi->xact_addr_filt = -1;
+		/* If fw supports returning the VIN as part of FW_VI_CMD,
+		 * save the returned values.
+		 */
+		if (adap->params.viid_smt_extn_support) {
+			pi->vivld = vivld;
+			pi->vin = vin;
+		} else {
+			/* Retrieve the values from VIID */
+			pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+			pi->vin = FW_VIID_VIN_G(pi->viid);
+		}
 	}
 
 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
@@ -5621,6 +5620,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 			     (is_t5(adapter->params.chip) ? STATMODE_V(0) :
 			      T6_STATMODE_V(0)));
 
+	/* Initialize hash mac addr list */
+	INIT_LIST_HEAD(&adapter->mac_hlist);
+
 	for_each_port(adapter, i) {
 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
 					   MAX_ETH_QSETS);
@@ -5899,6 +5901,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 static void remove_one(struct pci_dev *pdev)
 {
 	struct adapter *adapter = pci_get_drvdata(pdev);
+	struct hash_mac_addr *entry, *tmp;
 
 	if (!adapter) {
 		pci_release_regions(pdev);
@@ -5948,6 +5951,12 @@ static void remove_one(struct pci_dev *pdev)
 		if (adapter->num_uld || adapter->num_ofld_uld)
 			t4_uld_mem_free(adapter);
 		free_some_resources(adapter);
+		list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+					 list) {
+			list_del(&entry->list);
+			kfree(entry);
+		}
+
 #if IS_ENABLED(CONFIG_IPV6)
 		t4_cleanup_clip_tbl(adapter);
 #endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 99022c0..4852feb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -495,14 +495,11 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
 		ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
 
 	if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
-		u32 viid = cxgb4_port_viid(dev);
-		u32 vf = FW_VIID_VIN_G(viid);
-		u32 pf = FW_VIID_PFN_G(viid);
-		u32 vld = FW_VIID_VIVLD_G(viid);
+		struct port_info *pi = (struct port_info *)netdev_priv(dev);
 
-		ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
-				FT_VNID_ID_PF_V(pf) |
-				FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
+		ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) |
+				FT_VNID_ID_PF_V(adap->pf) |
+				FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift;
 	}
 
 	return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cb52394..4cb261d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7141,21 +7141,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
 			 unsigned int cache_line_size)
 {
 	unsigned int page_shift = fls(page_size) - 1;
-	unsigned int sge_hps = page_shift - 10;
 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
 	unsigned int fl_align_log = fls(fl_align) - 1;
 
-	t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
-		     HOSTPAGESIZEPF0_V(sge_hps) |
-		     HOSTPAGESIZEPF1_V(sge_hps) |
-		     HOSTPAGESIZEPF2_V(sge_hps) |
-		     HOSTPAGESIZEPF3_V(sge_hps) |
-		     HOSTPAGESIZEPF4_V(sge_hps) |
-		     HOSTPAGESIZEPF5_V(sge_hps) |
-		     HOSTPAGESIZEPF6_V(sge_hps) |
-		     HOSTPAGESIZEPF7_V(sge_hps));
-
 	if (is_t4(adap->params.chip)) {
 		t4_set_reg_field(adap, SGE_CONTROL_A,
 				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -7488,7 +7477,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
  */
 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
-		unsigned int *rss_size)
+		unsigned int *rss_size, u8 *vivld, u8 *vin)
 {
 	int ret;
 	struct fw_vi_cmd c;
@@ -7523,6 +7512,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 	}
 	if (rss_size)
 		*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+
+	if (vivld)
+		*vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
+
+	if (vin)
+		*vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
+
 	return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
 }
 
@@ -7980,7 +7976,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
  *	MAC value.
  */
 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
-		  int idx, const u8 *addr, bool persist, bool add_smt)
+		  int idx, const u8 *addr, bool persist, u8 *smt_idx)
 {
 	int ret, mode;
 	struct fw_vi_mac_cmd c;
@@ -7989,7 +7985,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
 
 	if (idx < 0)                             /* new allocation */
 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
-	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+	mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
 
 	memset(&c, 0, sizeof(c));
 	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
@@ -8006,6 +8002,23 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
 		if (ret >= max_mac_addr)
 			ret = -ENOMEM;
+		if (smt_idx) {
+			if (adap->params.viid_smt_extn_support) {
+				*smt_idx = FW_VI_MAC_CMD_SMTID_G
+						    (be32_to_cpu(c.op_to_viid));
+			} else {
+				/* In T4/T5, SMT contains 256 SMAC entries
+				 * organized in 128 rows of 2 entries each.
+				 * In T6, SMT contains 256 SMAC entries in
+				 * 256 rows.
+				 */
+				if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
+								     CHELSIO_T5)
+					*smt_idx = (viid & FW_VIID_VIN_M) << 1;
+				else
+					*smt_idx = (viid & FW_VIID_VIN_M);
+			}
+		}
 	}
 	return ret;
 }
@@ -9374,6 +9387,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
 	enum fw_port_type port_type;
 	int mdio_addr;
 	fw_port_cap32_t pcaps, acaps;
+	u8 vivld = 0, vin = 0;
 	int ret;
 
 	/* If we haven't yet determined whether we're talking to Firmware
@@ -9428,7 +9442,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
 		acaps = be32_to_cpu(cmd.u.info32.acaps32);
 	}
 
-	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
+			  &vivld, &vin);
 	if (ret < 0)
 		return ret;
 
@@ -9437,6 +9452,18 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
 	pi->lport = port;
 	pi->rss_size = rss_size;
 
+	/* If fw supports returning the VIN as part of FW_VI_CMD,
+	 * save the returned values.
+	 */
+	if (adapter->params.viid_smt_extn_support) {
+		pi->vivld = vivld;
+		pi->vin = vin;
+	} else {
+		/* Retrieve the values from VIID */
+		pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+		pi->vin =  FW_VIID_VIN_G(pi->viid);
+	}
+
 	pi->port_type = port_type;
 	pi->mdio_addr = mdio_addr;
 	pi->mod_type = FW_PORT_MOD_TYPE_NA;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60df66f..bf7325f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -217,6 +217,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
 	CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+	CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57584ab..1d9b3e1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1253,6 +1253,7 @@ enum fw_params_param_dev {
 	FW_PARAMS_PARAM_DEV_HMA_SIZE	= 0x20,
 	FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
 	FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR    = 0x24,
+	FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
 };
 
 /*
@@ -2109,6 +2110,19 @@ struct fw_vi_cmd {
 #define FW_VI_CMD_FREE_V(x)	((x) << FW_VI_CMD_FREE_S)
 #define FW_VI_CMD_FREE_F	FW_VI_CMD_FREE_V(1U)
 
+#define FW_VI_CMD_VFVLD_S	24
+#define FW_VI_CMD_VFVLD_M	0x1
+#define FW_VI_CMD_VFVLD_V(x)	((x) << FW_VI_CMD_VFVLD_S)
+#define FW_VI_CMD_VFVLD_G(x)	\
+	(((x) >> FW_VI_CMD_VFVLD_S) & FW_VI_CMD_VFVLD_M)
+#define FW_VI_CMD_VFVLD_F	FW_VI_CMD_VFVLD_V(1U)
+
+#define FW_VI_CMD_VIN_S		16
+#define FW_VI_CMD_VIN_M		0xff
+#define FW_VI_CMD_VIN_V(x)	((x) << FW_VI_CMD_VIN_S)
+#define FW_VI_CMD_VIN_G(x)	\
+	(((x) >> FW_VI_CMD_VIN_S) & FW_VI_CMD_VIN_M)
+
 #define FW_VI_CMD_VIID_S	0
 #define FW_VI_CMD_VIID_M	0xfff
 #define FW_VI_CMD_VIID_V(x)	((x) << FW_VI_CMD_VIID_S)
@@ -2182,6 +2196,12 @@ struct fw_vi_mac_cmd {
 	} u;
 };
 
+#define FW_VI_MAC_CMD_SMTID_S		12
+#define FW_VI_MAC_CMD_SMTID_M		0xff
+#define FW_VI_MAC_CMD_SMTID_V(x)	((x) << FW_VI_MAC_CMD_SMTID_S)
+#define FW_VI_MAC_CMD_SMTID_G(x)	\
+	(((x) >> FW_VI_MAC_CMD_SMTID_S) & FW_VI_MAC_CMD_SMTID_M)
+
 #define FW_VI_MAC_CMD_VIID_S	0
 #define FW_VI_MAC_CMD_VIID_V(x)	((x) << FW_VI_MAC_CMD_VIID_S)
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ff84791..8a2ad6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -722,6 +722,7 @@ static int adapter_up(struct adapter *adapter)
 
 		if (adapter->flags & USING_MSIX)
 			name_msix_vecs(adapter);
+
 		adapter->flags |= FULL_INIT_DONE;
 	}
 
@@ -747,8 +748,6 @@ static int adapter_up(struct adapter *adapter)
 	enable_rx(adapter);
 	t4vf_sge_start(adapter);
 
-	/* Initialize hash mac addr list*/
-	INIT_LIST_HEAD(&adapter->mac_hlist);
 	return 0;
 }
 
@@ -3036,6 +3035,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
 	if (err)
 		goto err_unmap_bar;
 
+	/* Initialize hash mac addr list */
+	INIT_LIST_HEAD(&adapter->mac_hlist);
+
 	/*
 	 * Allocate our "adapter ports" and stitch everything together.
 	 */
@@ -3287,6 +3289,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
 {
 	struct adapter *adapter = pci_get_drvdata(pdev);
+	struct hash_mac_addr *entry, *tmp;
 
 	/*
 	 * Tear down driver state associated with device.
@@ -3337,6 +3340,11 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
 		if (!is_t4(adapter->params.chip))
 			iounmap(adapter->bar2);
 		kfree(adapter->mbox_log);
+		list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+					 list) {
+			list_del(&entry->list);
+			kfree(entry);
+		}
 		kfree(adapter);
 	}
 
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c5ad7a4..245abf0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -796,7 +796,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
 	u16 vlan_tag;
 
 	vlan_tag = skb_vlan_tag_get(skb);
-	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+	vlan_prio = skb_vlan_tag_get_prio(skb);
 	/* If vlan priority provided by OS is NOT in available bmap */
 	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
 		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
@@ -1049,30 +1049,35 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
 					     struct be_wrb_params
 					     *wrb_params)
 {
+	bool insert_vlan = false;
 	u16 vlan_tag = 0;
 
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (unlikely(!skb))
 		return skb;
 
-	if (skb_vlan_tag_present(skb))
+	if (skb_vlan_tag_present(skb)) {
 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+		insert_vlan = true;
+	}
 
 	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
-		if (!vlan_tag)
+		if (!insert_vlan) {
 			vlan_tag = adapter->pvid;
+			insert_vlan = true;
+		}
 		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
 		 * skip VLAN insertion
 		 */
 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
 	}
 
-	if (vlan_tag) {
+	if (insert_vlan) {
 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
 						vlan_tag);
 		if (unlikely(!skb))
 			return skb;
-		skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(skb);
 	}
 
 	/* Insert the outer VLAN, if any */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6e0f47f..9510c9d 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2475,6 +2475,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
 
 static int dpaa_phy_init(struct net_device *net_dev)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 	struct mac_device *mac_dev;
 	struct phy_device *phy_dev;
 	struct dpaa_priv *priv;
@@ -2491,7 +2492,9 @@ static int dpaa_phy_init(struct net_device *net_dev)
 	}
 
 	/* Remove any features not supported by the controller */
-	phy_dev->supported &= mac_dev->if_support;
+	ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
+	linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+
 	phy_support_asym_pause(phy_dev);
 
 	mac_dev->phy_dev = phy_dev;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 13d6e22..6249711 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -529,6 +529,75 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
 	return 0;
 }
 
+static int dpaa_get_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *c)
+{
+	struct qman_portal *portal;
+	u32 period;
+	u8 thresh;
+
+	portal = qman_get_affine_portal(smp_processor_id());
+	qman_portal_get_iperiod(portal, &period);
+	qman_dqrr_get_ithresh(portal, &thresh);
+
+	c->rx_coalesce_usecs = period;
+	c->rx_max_coalesced_frames = thresh;
+	c->use_adaptive_rx_coalesce = false;
+
+	return 0;
+}
+
+static int dpaa_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *c)
+{
+	const cpumask_t *cpus = qman_affine_cpus();
+	bool needs_revert[NR_CPUS] = {false};
+	struct qman_portal *portal;
+	u32 period, prev_period;
+	u8 thresh, prev_thresh;
+	int cpu, res;
+
+	if (c->use_adaptive_rx_coalesce)
+		return -EINVAL;
+
+	period = c->rx_coalesce_usecs;
+	thresh = c->rx_max_coalesced_frames;
+
+	/* save previous values */
+	portal = qman_get_affine_portal(smp_processor_id());
+	qman_portal_get_iperiod(portal, &prev_period);
+	qman_dqrr_get_ithresh(portal, &prev_thresh);
+
+	/* set new values */
+	for_each_cpu(cpu, cpus) {
+		portal = qman_get_affine_portal(cpu);
+		res = qman_portal_set_iperiod(portal, period);
+		if (res)
+			goto revert_values;
+		res = qman_dqrr_set_ithresh(portal, thresh);
+		if (res) {
+			qman_portal_set_iperiod(portal, prev_period);
+			goto revert_values;
+		}
+		needs_revert[cpu] = true;
+	}
+
+	return 0;
+
+revert_values:
+	/* restore previous values */
+	for_each_cpu(cpu, cpus) {
+		if (!needs_revert[cpu])
+			continue;
+		portal = qman_get_affine_portal(cpu);
+		/* previous values will not fail, ignore return value */
+		qman_portal_set_iperiod(portal, prev_period);
+		qman_dqrr_set_ithresh(portal, prev_thresh);
+	}
+
+	return res;
+}
+
 const struct ethtool_ops dpaa_ethtool_ops = {
 	.get_drvinfo = dpaa_get_drvinfo,
 	.get_msglevel = dpaa_get_msglevel,
@@ -545,4 +614,6 @@ const struct ethtool_ops dpaa_ethtool_ops = {
 	.get_rxnfc = dpaa_get_rxnfc,
 	.set_rxnfc = dpaa_set_rxnfc,
 	.get_ts_info = dpaa_get_ts_info,
+	.get_coalesce = dpaa_get_coalesce,
+	.set_coalesce = dpaa_set_coalesce,
 };
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 88f7acc..640967a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -203,8 +203,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 			 struct dpaa2_eth_channel *ch,
 			 const struct dpaa2_fd *fd,
-			 struct napi_struct *napi,
-			 u16 queue_id)
+			 struct dpaa2_eth_fq *fq)
 {
 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
 	u8 fd_format = dpaa2_fd_get_format(fd);
@@ -267,12 +266,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	}
 
 	skb->protocol = eth_type_trans(skb, priv->net_dev);
-	skb_record_rx_queue(skb, queue_id);
+	skb_record_rx_queue(skb, fq->flowid);
 
 	percpu_stats->rx_packets++;
 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 
-	napi_gro_receive(napi, skb);
+	napi_gro_receive(&ch->napi, skb);
 
 	return;
 
@@ -289,7 +288,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
  * Observance of NAPI budget is not our concern, leaving that to the caller.
  */
 static int consume_frames(struct dpaa2_eth_channel *ch,
-			  enum dpaa2_eth_fq_type *type)
+			  struct dpaa2_eth_fq **src)
 {
 	struct dpaa2_eth_priv *priv = ch->priv;
 	struct dpaa2_eth_fq *fq = NULL;
@@ -312,7 +311,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
 		fd = dpaa2_dq_fd(dq);
 		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
 
-		fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+		fq->consume(priv, ch, fd, fq);
 		cleaned++;
 	} while (!is_last);
 
@@ -323,10 +322,10 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
 	ch->stats.frames += cleaned;
 
 	/* A dequeue operation only pulls frames from a single queue
-	 * into the store. Return the frame queue type as an out param.
+	 * into the store. Return the frame queue as an out param.
 	 */
-	if (type)
-		*type = fq->type;
+	if (src)
+		*src = fq;
 
 	return cleaned;
 }
@@ -571,8 +570,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 	struct rtnl_link_stats64 *percpu_stats;
 	struct dpaa2_eth_drv_stats *percpu_extras;
 	struct dpaa2_eth_fq *fq;
+	struct netdev_queue *nq;
 	u16 queue_mapping;
 	unsigned int needed_headroom;
+	u32 fd_len;
 	int err, i;
 
 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
@@ -644,8 +645,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 		/* Clean up everything, including freeing the skb */
 		free_tx_fd(priv, &fd);
 	} else {
+		fd_len = dpaa2_fd_get_len(&fd);
 		percpu_stats->tx_packets++;
-		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+		percpu_stats->tx_bytes += fd_len;
+
+		nq = netdev_get_tx_queue(net_dev, queue_mapping);
+		netdev_tx_sent_queue(nq, fd_len);
 	}
 
 	return NETDEV_TX_OK;
@@ -661,11 +666,11 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
 			      struct dpaa2_eth_channel *ch __always_unused,
 			      const struct dpaa2_fd *fd,
-			      struct napi_struct *napi __always_unused,
-			      u16 queue_id __always_unused)
+			      struct dpaa2_eth_fq *fq)
 {
 	struct rtnl_link_stats64 *percpu_stats;
 	struct dpaa2_eth_drv_stats *percpu_extras;
+	u32 fd_len = dpaa2_fd_get_len(fd);
 	u32 fd_errors;
 
 	/* Tracing point */
@@ -673,7 +678,10 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
 
 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 	percpu_extras->tx_conf_frames++;
-	percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+	percpu_extras->tx_conf_bytes += fd_len;
+
+	fq->dq_frames++;
+	fq->dq_bytes += fd_len;
 
 	/* Check frame errors in the FD field */
 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -934,8 +942,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 	struct dpaa2_eth_channel *ch;
 	struct dpaa2_eth_priv *priv;
 	int rx_cleaned = 0, txconf_cleaned = 0;
-	enum dpaa2_eth_fq_type type = 0;
-	int store_cleaned;
+	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+	struct netdev_queue *nq;
+	int store_cleaned, work_done;
 	int err;
 
 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -949,18 +958,25 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 		/* Refill pool if appropriate */
 		refill_pool(priv, ch, priv->bpid);
 
-		store_cleaned = consume_frames(ch, &type);
-		if (type == DPAA2_RX_FQ)
+		store_cleaned = consume_frames(ch, &fq);
+		if (!store_cleaned)
+			break;
+		if (fq->type == DPAA2_RX_FQ) {
 			rx_cleaned += store_cleaned;
-		else
+		} else {
 			txconf_cleaned += store_cleaned;
+			/* We have a single Tx conf FQ on this channel */
+			txc_fq = fq;
+		}
 
 		/* If we either consumed the whole NAPI budget with Rx frames
 		 * or we reached the Tx confirmations threshold, we're done.
 		 */
 		if (rx_cleaned >= budget ||
-		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
-			return budget;
+		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+			work_done = budget;
+			goto out;
+		}
 	} while (store_cleaned);
 
 	/* We didn't consume the entire budget, so finish napi and
@@ -974,7 +990,18 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
 		  ch->nctx.desired_cpu);
 
-	return max(rx_cleaned, 1);
+	work_done = max(rx_cleaned, 1);
+
+out:
+	if (txc_fq) {
+		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+					  txc_fq->dq_bytes);
+		txc_fq->dq_frames = 0;
+		txc_fq->dq_bytes = 0;
+	}
+
+	return work_done;
 }
 
 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -1434,8 +1461,11 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
 				     FSL_MC_POOL_DPCON, &dpcon);
 	if (err) {
-		dev_info(dev, "Not enough DPCONs, will go on as-is\n");
-		return NULL;
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+		return ERR_PTR(err);
 	}
 
 	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
@@ -1493,8 +1523,10 @@ alloc_channel(struct dpaa2_eth_priv *priv)
 		return NULL;
 
 	channel->dpcon = setup_dpcon(priv);
-	if (!channel->dpcon)
+	if (IS_ERR_OR_NULL(channel->dpcon)) {
+		err = PTR_ERR(channel->dpcon);
 		goto err_setup;
+	}
 
 	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
 				   &attr);
@@ -1513,7 +1545,7 @@ alloc_channel(struct dpaa2_eth_priv *priv)
 	free_dpcon(priv, channel->dpcon);
 err_setup:
 	kfree(channel);
-	return NULL;
+	return ERR_PTR(err);
 }
 
 static void free_channel(struct dpaa2_eth_priv *priv,
@@ -1547,10 +1579,11 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 	for_each_online_cpu(i) {
 		/* Try to allocate a channel */
 		channel = alloc_channel(priv);
-		if (!channel) {
-			dev_info(dev,
-				 "No affine channel for cpu %d and above\n", i);
-			err = -ENODEV;
+		if (IS_ERR_OR_NULL(channel)) {
+			err = PTR_ERR(channel);
+			if (err != -EPROBE_DEFER)
+				dev_info(dev,
+					 "No affine channel for cpu %d and above\n", i);
 			goto err_alloc_ch;
 		}
 
@@ -1597,7 +1630,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 		/* Stop if we already have enough channels to accommodate all
 		 * RX and TX conf queues
 		 */
-		if (priv->num_channels == dpaa2_eth_queue_count(priv))
+		if (priv->num_channels == priv->dpni_attrs.num_queues)
 			break;
 	}
 
@@ -1608,9 +1641,12 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 err_service_reg:
 	free_channel(priv, channel);
 err_alloc_ch:
+	if (err == -EPROBE_DEFER)
+		return err;
+
 	if (cpumask_empty(&priv->dpio_cpumask)) {
 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
-		return err;
+		return -ENODEV;
 	}
 
 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
@@ -1732,7 +1768,10 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
 				     &dpbp_dev);
 	if (err) {
-		dev_err(dev, "DPBP device allocation failed\n");
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_err(dev, "DPBP device allocation failed\n");
 		return err;
 	}
 
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 452a8e9..16545e9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -271,14 +271,15 @@ struct dpaa2_eth_fq {
 	u32 tx_qdbin;
 	u16 flowid;
 	int target_cpu;
+	u32 dq_frames;
+	u32 dq_bytes;
 	struct dpaa2_eth_channel *channel;
 	enum dpaa2_eth_fq_type type;
 
 	void (*consume)(struct dpaa2_eth_priv *priv,
 			struct dpaa2_eth_channel *ch,
 			const struct dpaa2_fd *fd,
-			struct napi_struct *napi,
-			u16 queue_id);
+			struct dpaa2_eth_fq *fq);
 	struct dpaa2_eth_fq_stats stats;
 };
 
@@ -434,9 +435,10 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
 	       DPAA2_ETH_RX_HWA_SIZE;
 }
 
+/* We have exactly one {Rx, Tx conf} queue per channel */
 static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
 {
-	return priv->dpni_attrs.num_queues;
+	return priv->num_channels;
 }
 
 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 84b942b..9b150db 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
 
 	err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
 	if (err) {
-		dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+		if (err == -ENXIO)
+			err = -EPROBE_DEFER;
+		else
+			dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
 		goto err_exit;
 	}
 
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d79e4e0..71f4205 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -393,7 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
 	 */
 
 	/* get local capabilities */
-	lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising);
+	lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
 
 	/* get link partner capabilities */
 	rmt_adv = 0;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 82722d0..88a396f 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -473,7 +473,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 
 	if (data->get_tbipa) {
 		for_each_child_of_node(np, tbi) {
-			if (strcmp(tbi->type, "tbi-phy") == 0) {
+			if (of_node_is_type(tbi, "tbi-phy")) {
 				dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
 					tbi);
 				break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3c8da1a..0e102c7 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1784,14 +1784,20 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
  */
 static int init_phy(struct net_device *dev)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 	struct gfar_private *priv = netdev_priv(dev);
-	uint gigabit_support =
-		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
-		GFAR_SUPPORTED_GBIT : 0;
 	phy_interface_t interface;
 	struct phy_device *phydev;
 	struct ethtool_eee edata;
 
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
+
 	priv->oldlink = 0;
 	priv->oldspeed = 0;
 	priv->oldduplex = -1;
@@ -1809,8 +1815,8 @@ static int init_phy(struct net_device *dev)
 		gfar_configure_serdes(dev);
 
 	/* Remove any features not supported by the controller */
-	phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
-	phydev->advertising = phydev->supported;
+	linkmode_and(phydev->supported, phydev->supported, mask);
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	/* Add support for flow control */
 	phy_support_asym_pause(phydev);
@@ -3656,7 +3662,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
 		if (phydev->asym_pause)
 			rmt_adv |= LPA_PAUSE_ASYM;
 
-		lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
 		if (flowctrl & FLOW_CTRL_TX)
 			val |= MACCFG1_TX_FLOW;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 0d76e15..241325c 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1134,11 +1134,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
 		prio = vlan_tci_prio(rule);
 		prio_mask = vlan_tci_priom(rule);
 
-		if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
-			vlan |= RQFPR_CFI;
-			vlan_mask |= RQFPR_CFI;
-		} else if (cfi != VLAN_TAG_PRESENT &&
-			   cfi_mask == VLAN_TAG_PRESENT) {
+		if (cfi_mask) {
+			if (cfi)
+				vlan |= RQFPR_CFI;
 			vlan_mask |= RQFPR_CFI;
 		}
 	}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 32e0270..2e978cb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev)
 	if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
 		uec_configure_serdes(dev);
 
-	phy_set_max_speed(phydev, SPEED_100);
-
-	if (priv->max_speed == SPEED_1000)
-		phydev->supported |= ADVERTISED_1000baseT_Full;
-
-	phydev->advertising = phydev->supported;
+	phy_set_max_speed(phydev, priv->max_speed);
 
 	priv->phydev = phydev;
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 28e9078..c62378c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev)
  */
 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
 	struct phy_device *phy_dev = h->phy_dev;
 	int ret;
 
@@ -1180,8 +1181,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
 	if (unlikely(ret))
 		return -ENODEV;
 
-	phy_dev->supported &= h->if_support;
-	phy_dev->advertising = phy_dev->supported;
+	ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+	linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+	linkmode_copy(phy_dev->advertising, phy_dev->supported);
 
 	if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
 		phy_dev->autoneg = false;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 002534f..d01bf53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -9,6 +9,6 @@
 obj-$(CONFIG_HNS3) += hnae3.o
 
 obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
+hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
 
 hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 038326c..4d9cf39 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -36,6 +36,9 @@ enum HCLGE_MBX_OPCODE {
 	HCLGE_MBX_BIND_FUNC_QUEUE,	/* (VF -> PF) bind function and queue */
 	HCLGE_MBX_GET_LINK_STATUS,	/* (VF -> PF) get link status */
 	HCLGE_MBX_QUEUE_RESET,		/* (VF -> PF) reset queue */
+	HCLGE_MBX_KEEP_ALIVE,		/* (VF -> PF) send keep alive cmd */
+	HCLGE_MBX_SET_ALIVE,		/* (VF -> PF) set alive state */
+	HCLGE_MBX_SET_MTU,		/* (VF -> PF) set mtu */
 };
 
 /* below are per-VF mac-vlan subcodes */
@@ -85,6 +88,12 @@ struct hclge_mbx_pf_to_vf_cmd {
 	u16 msg[8];
 };
 
+struct hclge_vf_rst_cmd {
+	u8 dest_vfid;
+	u8 vf_rst;
+	u8 rsv[22];
+};
+
 /* used by VF to store the received Async responses from PF */
 struct hclgevf_mbx_arq_ring {
 #define HCLGE_MBX_MAX_ARQ_MSG_SIZE	8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 055b406..a1707b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -52,6 +52,7 @@
 #define HNAE3_UNIC_CLIENT_INITED_B		0x4
 #define HNAE3_ROCE_CLIENT_INITED_B		0x5
 #define HNAE3_DEV_SUPPORT_FD_B			0x6
+#define HNAE3_DEV_SUPPORT_GRO_B			0x7
 
 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
 		BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,6 +66,9 @@
 #define hnae3_dev_fd_supported(hdev) \
 	hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
 
+#define hnae3_dev_gro_supported(hdev) \
+	hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+
 #define ring_ptr_move_fw(ring, p) \
 	((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
 #define ring_ptr_move_bw(ring, p) \
@@ -124,7 +128,10 @@ enum hnae3_reset_notify_type {
 
 enum hnae3_reset_type {
 	HNAE3_VF_RESET,
+	HNAE3_VF_FUNC_RESET,
+	HNAE3_VF_PF_FUNC_RESET,
 	HNAE3_VF_FULL_RESET,
+	HNAE3_FLR_RESET,
 	HNAE3_FUNC_RESET,
 	HNAE3_CORE_RESET,
 	HNAE3_GLOBAL_RESET,
@@ -132,6 +139,11 @@ enum hnae3_reset_type {
 	HNAE3_NONE_RESET,
 };
 
+enum hnae3_flr_state {
+	HNAE3_FLR_DOWN,
+	HNAE3_FLR_DONE,
+};
+
 struct hnae3_vector_info {
 	u8 __iomem *io_addr;
 	int vector;
@@ -162,6 +174,7 @@ struct hnae3_client_ops {
 	int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
 	int (*reset_notify)(struct hnae3_handle *handle,
 			    enum hnae3_reset_notify_type type);
+	enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
 };
 
 #define HNAE3_CLIENT_NAME_LENGTH 16
@@ -197,6 +210,10 @@ struct hnae3_ae_dev {
  *   Enable the hardware
  * stop()
  *   Disable the hardware
+ * start_client()
+ *   Inform the hclge that client has been started
+ * stop_client()
+ *   Inform the hclge that client has been stopped
  * get_status()
  *   Get the carrier state of the back channel of the handle, 1 for ok, 0 for
  *   non-ok
@@ -292,17 +309,22 @@ struct hnae3_ae_dev {
  *   Set vlan filter config of vf
  * enable_hw_strip_rxvtag()
  *   Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ *   Enable/disable HW GRO
  */
 struct hnae3_ae_ops {
 	int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
 	void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
-
+	void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
+	void (*flr_done)(struct hnae3_ae_dev *ae_dev);
 	int (*init_client_instance)(struct hnae3_client *client,
 				    struct hnae3_ae_dev *ae_dev);
 	void (*uninit_client_instance)(struct hnae3_client *client,
 				       struct hnae3_ae_dev *ae_dev);
 	int (*start)(struct hnae3_handle *handle);
 	void (*stop)(struct hnae3_handle *handle);
+	int (*client_start)(struct hnae3_handle *handle);
+	void (*client_stop)(struct hnae3_handle *handle);
 	int (*get_status)(struct hnae3_handle *handle);
 	void (*get_ksettings_an_result)(struct hnae3_handle *handle,
 					u8 *auto_neg, u32 *speed, u8 *duplex);
@@ -403,6 +425,8 @@ struct hnae3_ae_ops {
 				  u16 vlan, u8 qos, __be16 proto);
 	int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
 	void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+	void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+					  enum hnae3_reset_type rst_type);
 	void (*get_channels)(struct hnae3_handle *handle,
 			     struct ethtool_channels *ch);
 	void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
@@ -429,7 +453,12 @@ struct hnae3_ae_ops {
 				struct ethtool_rxnfc *cmd, u32 *rule_locs);
 	int (*restore_fd_rules)(struct hnae3_handle *handle);
 	void (*enable_fd)(struct hnae3_handle *handle, bool enable);
+	int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
 	pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
+	bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+	bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+	unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+	int (*set_gro_en)(struct hnae3_handle *handle, int enable);
 };
 
 struct hnae3_dcb_ops {
@@ -488,6 +517,14 @@ struct hnae3_roce_private_info {
 	void __iomem *roce_io_base;
 	int base_vector;
 	int num_vectors;
+
+	/* The below attributes defined for RoCE client, hnae3 gives
+	 * initial values to them, and RoCE client can modify and use
+	 * them.
+	 */
+	unsigned long reset_state;
+	unsigned long instance_state;
+	unsigned long state;
 };
 
 struct hnae3_unic_private_info {
@@ -520,9 +557,6 @@ struct hnae3_handle {
 	struct hnae3_ae_algo *ae_algo;  /* the class who provides this handle */
 	u64 flags; /* Indicate the capabilities for this handle*/
 
-	unsigned long last_reset_time;
-	enum hnae3_reset_type reset_level;
-
 	union {
 		struct net_device *netdev; /* first member */
 		struct hnae3_knic_private_info kinfo;
@@ -533,6 +567,7 @@ struct hnae3_handle {
 	u32 numa_node_mask;	/* for multi-chip support */
 
 	u8 netdev_flags;
+	struct dentry *hnae3_dbgfs;
 };
 
 #define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index ea5f8a8..b6fabbb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
 {
 	struct hnae3_handle *h = hns3_get_handle(ndev);
 
+	if (hns3_nic_resetting(ndev))
+		return -EBUSY;
+
 	if (h->kinfo.dcb_ops->ieee_getets)
 		return h->kinfo.dcb_ops->ieee_getets(h, ets);
 
@@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
 {
 	struct hnae3_handle *h = hns3_get_handle(ndev);
 
+	if (hns3_nic_resetting(ndev))
+		return -EBUSY;
+
 	if (h->kinfo.dcb_ops->ieee_setets)
 		return h->kinfo.dcb_ops->ieee_setets(h, ets);
 
@@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
 {
 	struct hnae3_handle *h = hns3_get_handle(ndev);
 
+	if (hns3_nic_resetting(ndev))
+		return -EBUSY;
+
 	if (h->kinfo.dcb_ops->ieee_getpfc)
 		return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
 
@@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
 {
 	struct hnae3_handle *h = hns3_get_handle(ndev);
 
+	if (hns3_nic_resetting(ndev))
+		return -EBUSY;
+
 	if (h->kinfo.dcb_ops->ieee_setpfc)
 		return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
new file mode 100644
index 0000000..86d667a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+#define HNS3_DBG_READ_LEN 256
+
+static struct dentry *hns3_dbgfs_root;
+
+static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
+{
+	struct hns3_nic_priv *priv = h->priv;
+	struct hns3_nic_ring_data *ring_data;
+	struct hns3_enet_ring *ring;
+	u32 base_add_l, base_add_h;
+	u32 queue_num, queue_max;
+	u32 value, i = 0;
+	int cnt;
+
+	if (!priv->ring_data) {
+		dev_err(&h->pdev->dev, "ring_data is NULL\n");
+		return -EFAULT;
+	}
+
+	queue_max = h->kinfo.num_tqps;
+	cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
+	if (cnt)
+		queue_num = 0;
+	else
+		queue_max = queue_num + 1;
+
+	dev_info(&h->pdev->dev, "queue info\n");
+
+	if (queue_num >= h->kinfo.num_tqps) {
+		dev_err(&h->pdev->dev,
+			"Queue number(%u) is out of range(%u)\n", queue_num,
+			h->kinfo.num_tqps - 1);
+		return -EINVAL;
+	}
+
+	ring_data = priv->ring_data;
+	for (i = queue_num; i < queue_max; i++) {
+		/* Each cycle needs to determine whether the instance is reset,
+		 * to prevent reference to invalid memory. And need to ensure
+		 * that the following code is executed within 100ms.
+		 */
+		if (test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+		    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+			return -EPERM;
+
+		ring = ring_data[i + h->kinfo.num_tqps].ring;
+		base_add_h = readl_relaxed(ring->tqp->io_base +
+					   HNS3_RING_RX_RING_BASEADDR_H_REG);
+		base_add_l = readl_relaxed(ring->tqp->io_base +
+					   HNS3_RING_RX_RING_BASEADDR_L_REG);
+		dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+			 base_add_h, base_add_l);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_RX_RING_BD_NUM_REG);
+		dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_RX_RING_BD_LEN_REG);
+		dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_RX_RING_TAIL_REG);
+		dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_RX_RING_HEAD_REG);
+		dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_RX_RING_FBDNUM_REG);
+		dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
+		dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+
+		ring = ring_data[i].ring;
+		base_add_h = readl_relaxed(ring->tqp->io_base +
+					   HNS3_RING_TX_RING_BASEADDR_H_REG);
+		base_add_l = readl_relaxed(ring->tqp->io_base +
+					   HNS3_RING_TX_RING_BASEADDR_L_REG);
+		dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+			 base_add_h, base_add_l);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_TX_RING_BD_NUM_REG);
+		dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_TX_RING_TC_REG);
+		dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_TX_RING_TAIL_REG);
+		dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_TX_RING_HEAD_REG);
+		dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_TX_RING_FBDNUM_REG);
+		dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_TX_RING_OFFSET_REG);
+		dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+
+		value = readl_relaxed(ring->tqp->io_base +
+				      HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
+		dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+			 value);
+	}
+
+	return 0;
+}
+
+static void hns3_dbg_help(struct hnae3_handle *h)
+{
+	dev_info(&h->pdev->dev, "available commands\n");
+	dev_info(&h->pdev->dev, "queue info [number]\n");
+	dev_info(&h->pdev->dev, "dump fd tcam\n");
+	dev_info(&h->pdev->dev, "dump tc\n");
+	dev_info(&h->pdev->dev, "dump tm\n");
+	dev_info(&h->pdev->dev, "dump qos pause cfg\n");
+	dev_info(&h->pdev->dev, "dump qos pri map\n");
+	dev_info(&h->pdev->dev, "dump qos buf cfg\n");
+}
+
+static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
+				 size_t count, loff_t *ppos)
+{
+	int uncopy_bytes;
+	char *buf;
+	int len;
+
+	if (*ppos != 0)
+		return 0;
+
+	if (count < HNS3_DBG_READ_LEN)
+		return -ENOSPC;
+
+	buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+		       "Please echo help to cmd to get help information");
+	uncopy_bytes = copy_to_user(buffer, buf, len);
+
+	kfree(buf);
+
+	if (uncopy_bytes)
+		return -EFAULT;
+
+	return (*ppos = len);
+}
+
+static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
+				  size_t count, loff_t *ppos)
+{
+	struct hnae3_handle *handle = filp->private_data;
+	struct hns3_nic_priv *priv  = handle->priv;
+	char *cmd_buf, *cmd_buf_tmp;
+	int uncopied_bytes;
+	int ret = 0;
+
+	if (*ppos != 0)
+		return 0;
+
+	/* Judge if the instance is being reset. */
+	if (test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+	    test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+		return 0;
+
+	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+	if (!cmd_buf)
+		return count;
+
+	uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
+	if (uncopied_bytes) {
+		kfree(cmd_buf);
+		return -EFAULT;
+	}
+
+	cmd_buf[count] = '\0';
+
+	cmd_buf_tmp = strchr(cmd_buf, '\n');
+	if (cmd_buf_tmp) {
+		*cmd_buf_tmp = '\0';
+		count = cmd_buf_tmp - cmd_buf + 1;
+	}
+
+	if (strncmp(cmd_buf, "help", 4) == 0)
+		hns3_dbg_help(handle);
+	else if (strncmp(cmd_buf, "queue info", 10) == 0)
+		ret = hns3_dbg_queue_info(handle, cmd_buf);
+	else if (handle->ae_algo->ops->dbg_run_cmd)
+		ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+
+	if (ret)
+		hns3_dbg_help(handle);
+
+	kfree(cmd_buf);
+	cmd_buf = NULL;
+
+	return count;
+}
+
+static const struct file_operations hns3_dbg_cmd_fops = {
+	.owner = THIS_MODULE,
+	.open  = simple_open,
+	.read  = hns3_dbg_cmd_read,
+	.write = hns3_dbg_cmd_write,
+};
+
+void hns3_dbg_init(struct hnae3_handle *handle)
+{
+	const char *name = pci_name(handle->pdev);
+	struct dentry *pfile;
+
+	handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
+	if (!handle->hnae3_dbgfs)
+		return;
+
+	pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+				    &hns3_dbg_cmd_fops);
+	if (!pfile) {
+		debugfs_remove_recursive(handle->hnae3_dbgfs);
+		handle->hnae3_dbgfs = NULL;
+		dev_warn(&handle->pdev->dev, "create file for %s fail\n",
+			 name);
+	}
+}
+
+void hns3_dbg_uninit(struct hnae3_handle *handle)
+{
+	debugfs_remove_recursive(handle->hnae3_dbgfs);
+	handle->hnae3_dbgfs = NULL;
+}
+
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+	hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+	if (!hns3_dbgfs_root) {
+		pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
+		return;
+	}
+}
+
+void hns3_dbg_unregister_debugfs(void)
+{
+	debugfs_remove_recursive(hns3_dbgfs_root);
+	hns3_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 20fcf0d..d1b2de2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,6 +15,7 @@
 #include <linux/vermagic.h>
 #include <net/gre.h>
 #include <net/pkt_cls.h>
+#include <net/tcp.h>
 #include <net/vxlan.h>
 
 #include "hnae3.h"
@@ -312,6 +313,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
 	return min_t(u16, rss_size, max_rss_size);
 }
 
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+	u32 rcb_reg;
+
+	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+	rcb_reg |= BIT(HNS3_RING_EN_B);
+	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+	u32 rcb_reg;
+
+	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+	rcb_reg &= ~BIT(HNS3_RING_EN_B);
+	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
 static int hns3_nic_net_up(struct net_device *netdev)
 {
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -334,6 +353,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
 	for (i = 0; i < priv->vector_num; i++)
 		hns3_vector_enable(&priv->tqp_vector[i]);
 
+	/* enable rcb */
+	for (j = 0; j < h->kinfo.num_tqps; j++)
+		hns3_tqp_enable(h->kinfo.tqp[j]);
+
 	/* start the ae_dev */
 	ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
 	if (ret)
@@ -344,6 +367,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
 	return 0;
 
 out_start_err:
+	while (j--)
+		hns3_tqp_disable(h->kinfo.tqp[j]);
+
 	for (j = i - 1; j >= 0; j--)
 		hns3_vector_disable(&priv->tqp_vector[j]);
 
@@ -354,11 +380,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
 
 static int hns3_nic_net_open(struct net_device *netdev)
 {
-	struct hns3_nic_priv *priv = netdev_priv(netdev);
 	struct hnae3_handle *h = hns3_get_handle(netdev);
 	struct hnae3_knic_private_info *kinfo;
 	int i, ret;
 
+	if (hns3_nic_resetting(netdev))
+		return -EBUSY;
+
 	netif_carrier_off(netdev);
 
 	ret = hns3_nic_set_real_num_queue(netdev);
@@ -378,23 +406,24 @@ static int hns3_nic_net_open(struct net_device *netdev)
 				       kinfo->prio_tc[i]);
 	}
 
-	priv->ae_handle->last_reset_time = jiffies;
 	return 0;
 }
 
 static void hns3_nic_net_down(struct net_device *netdev)
 {
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = hns3_get_handle(netdev);
 	const struct hnae3_ae_ops *ops;
 	int i;
 
-	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
-		return;
-
 	/* disable vectors */
 	for (i = 0; i < priv->vector_num; i++)
 		hns3_vector_disable(&priv->tqp_vector[i]);
 
+	/* disable rcb */
+	for (i = 0; i < h->kinfo.num_tqps; i++)
+		hns3_tqp_disable(h->kinfo.tqp[i]);
+
 	/* stop ae_dev */
 	ops = priv->ae_handle->ae_algo->ops;
 	if (ops->stop)
@@ -408,6 +437,11 @@ static void hns3_nic_net_down(struct net_device *netdev)
 
 static int hns3_nic_net_stop(struct net_device *netdev)
 {
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+		return 0;
+
 	netif_tx_stop_all_queues(netdev);
 	netif_carrier_off(netdev);
 
@@ -1312,6 +1346,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
 			priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
 	}
 
+	if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+		if (features & NETIF_F_GRO_HW)
+			ret = h->ae_algo->ops->set_gro_en(h, true);
+		else
+			ret = h->ae_algo->ops->set_gro_en(h, false);
+		if (ret)
+			return ret;
+	}
+
 	if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
 	    h->ae_algo->ops->enable_vlan_filter) {
 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1530,18 +1573,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct hnae3_handle *h = hns3_get_handle(netdev);
-	bool if_running = netif_running(netdev);
 	int ret;
 
 	if (!h->ae_algo->ops->set_mtu)
 		return -EOPNOTSUPP;
 
-	/* if this was called with netdev up then bring netdevice down */
-	if (if_running) {
-		(void)hns3_nic_net_stop(netdev);
-		msleep(100);
-	}
-
 	ret = h->ae_algo->ops->set_mtu(h, new_mtu);
 	if (ret)
 		netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1549,10 +1585,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
 	else
 		netdev->mtu = new_mtu;
 
-	/* if the netdev was running earlier, bring it up again */
-	if (if_running && hns3_nic_net_open(netdev))
-		ret = -EINVAL;
-
 	return ret;
 }
 
@@ -1615,10 +1647,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
 
 	priv->tx_timeout_count++;
 
-	if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
-		return;
-
-	/* request the reset */
+	/* request the reset, and let the hclge to determine
+	 * which reset level should be done
+	 */
 	if (h->ae_algo->ops->reset_event)
 		h->ae_algo->ops->reset_event(h->pdev, h);
 }
@@ -1682,8 +1713,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
 static void hns3_get_dev_capability(struct pci_dev *pdev,
 				    struct hnae3_ae_dev *ae_dev)
 {
-	if (pdev->revision >= 0x21)
+	if (pdev->revision >= 0x21) {
 		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
+	}
 }
 
 /* hns3_probe - Device initialization routine
@@ -1819,9 +1852,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
 	return PCI_ERS_RESULT_DISCONNECT;
 }
 
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+	dev_info(&pdev->dev, "hns3 flr prepare\n");
+	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+		ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+	dev_info(&pdev->dev, "hns3 flr done\n");
+	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+		ae_dev->ops->flr_done(ae_dev);
+}
+
 static const struct pci_error_handlers hns3_err_handler = {
 	.error_detected = hns3_error_detected,
 	.slot_reset     = hns3_slot_reset,
+	.reset_prepare	= hns3_reset_prepare,
+	.reset_done	= hns3_reset_done,
 };
 
 static struct pci_driver hns3_driver = {
@@ -1875,7 +1928,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
 		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
 
 	if (pdev->revision >= 0x21) {
-		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+			NETIF_F_GRO_HW;
+		netdev->features |= NETIF_F_GRO_HW;
 
 		if (!(h->flags & HNAE3_SUPPORT_VF)) {
 			netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2253,6 +2308,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
 	if (!(netdev->features & NETIF_F_RXCSUM))
 		return;
 
+	/* We MUST enable hardware checksum before enabling hardware GRO */
+	if (skb_shinfo(skb)->gso_size) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return;
+	}
+
 	/* check if hardware has done checksum */
 	if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
 		return;
@@ -2296,6 +2357,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
 
 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
 {
+	if (skb_has_frag_list(skb))
+		napi_gro_flush(&ring->tqp_vector->napi, false);
+
 	napi_gro_receive(&ring->tqp_vector->napi, skb);
 }
 
@@ -2329,6 +2393,153 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
 	}
 }
 
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+			  unsigned char *va)
+{
+#define HNS3_NEED_ADD_FRAG	1
+	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+	struct sk_buff *skb;
+
+	ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+	skb = ring->skb;
+	if (unlikely(!skb)) {
+		netdev_err(netdev, "alloc rx skb fail\n");
+
+		u64_stats_update_begin(&ring->syncp);
+		ring->stats.sw_err_cnt++;
+		u64_stats_update_end(&ring->syncp);
+
+		return -ENOMEM;
+	}
+
+	prefetchw(skb->data);
+
+	ring->pending_buf = 1;
+	ring->frag_num = 0;
+	ring->tail_skb = NULL;
+	if (length <= HNS3_RX_HEAD_SIZE) {
+		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+		/* We can reuse buffer as-is, just make sure it is local */
+		if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+			desc_cb->reuse_flag = 1;
+		else /* This page cannot be reused so discard it */
+			put_page(desc_cb->priv);
+
+		ring_ptr_move_fw(ring, next_to_clean);
+		return 0;
+	}
+	u64_stats_update_begin(&ring->syncp);
+	ring->stats.seg_pkt_cnt++;
+	u64_stats_update_end(&ring->syncp);
+
+	ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+	__skb_put(skb, ring->pull_len);
+	hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+			    desc_cb);
+	ring_ptr_move_fw(ring, next_to_clean);
+
+	return HNS3_NEED_ADD_FRAG;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+			 struct sk_buff **out_skb, bool pending)
+{
+	struct sk_buff *skb = *out_skb;
+	struct sk_buff *head_skb = *out_skb;
+	struct sk_buff *new_skb;
+	struct hns3_desc_cb *desc_cb;
+	struct hns3_desc *pre_desc;
+	u32 bd_base_info;
+	int pre_bd;
+
+	/* if there is pending bd, the SW param next_to_clean has moved
+	 * to next and the next is NULL
+	 */
+	if (pending) {
+		pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
+			ring->desc_num;
+		pre_desc = &ring->desc[pre_bd];
+		bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
+	} else {
+		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+	}
+
+	while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+		desc = &ring->desc[ring->next_to_clean];
+		desc_cb = &ring->desc_cb[ring->next_to_clean];
+		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+		if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+			return -ENXIO;
+
+		if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+			new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
+						 HNS3_RX_HEAD_SIZE);
+			if (unlikely(!new_skb)) {
+				netdev_err(ring->tqp->handle->kinfo.netdev,
+					   "alloc rx skb frag fail\n");
+				return -ENXIO;
+			}
+			ring->frag_num = 0;
+
+			if (ring->tail_skb) {
+				ring->tail_skb->next = new_skb;
+				ring->tail_skb = new_skb;
+			} else {
+				skb_shinfo(skb)->frag_list = new_skb;
+				ring->tail_skb = new_skb;
+			}
+		}
+
+		if (ring->tail_skb) {
+			head_skb->truesize += hnae3_buf_size(ring);
+			head_skb->data_len += le16_to_cpu(desc->rx.size);
+			head_skb->len += le16_to_cpu(desc->rx.size);
+			skb = ring->tail_skb;
+		}
+
+		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+		ring_ptr_move_fw(ring, next_to_clean);
+		ring->pending_buf++;
+	}
+
+	return 0;
+}
+
+static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+			       u32 bd_base_info)
+{
+	u16 gro_count;
+	u32 l3_type;
+
+	gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+				    HNS3_RXD_GRO_COUNT_S);
+	/* if there is no HW GRO, do not set gro params */
+	if (!gro_count)
+		return;
+
+	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+	 * to skb_shinfo(skb)->gso_segs
+	 */
+	NAPI_GRO_CB(skb)->count = gro_count;
+
+	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+				  HNS3_RXD_L3ID_S);
+	if (l3_type == HNS3_L3_TYPE_IPV4)
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+	else if (l3_type == HNS3_L3_TYPE_IPV6)
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+	else
+		return;
+
+	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+						    HNS3_RXD_GRO_SIZE_M,
+						    HNS3_RXD_GRO_SIZE_S);
+	if (skb_shinfo(skb)->gso_size)
+		tcp_gro_complete(skb);
+}
+
 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
 				     struct sk_buff *skb)
 {
@@ -2345,18 +2556,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
 }
 
 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
-			     struct sk_buff **out_skb, int *out_bnum)
+			     struct sk_buff **out_skb)
 {
 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+	struct sk_buff *skb = ring->skb;
 	struct hns3_desc_cb *desc_cb;
 	struct hns3_desc *desc;
-	struct sk_buff *skb;
-	unsigned char *va;
 	u32 bd_base_info;
-	int pull_len;
 	u32 l234info;
 	int length;
-	int bnum;
+	int ret;
 
 	desc = &ring->desc[ring->next_to_clean];
 	desc_cb = &ring->desc_cb[ring->next_to_clean];
@@ -2368,9 +2577,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
 
 	/* Check valid BD */
 	if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
-		return -EFAULT;
+		return -ENXIO;
 
-	va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+	if (!skb)
+		ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
 
 	/* Prefetch first cache line of first page
 	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2379,62 +2589,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
 	 * lines. In such a case, single fetch would suffice to cache in the
 	 * relevant part of the header.
 	 */
-	prefetch(va);
+	prefetch(ring->va);
 #if L1_CACHE_BYTES < 128
-	prefetch(va + L1_CACHE_BYTES);
+	prefetch(ring->va + L1_CACHE_BYTES);
 #endif
 
-	skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
-					HNS3_RX_HEAD_SIZE);
-	if (unlikely(!skb)) {
-		netdev_err(netdev, "alloc rx skb fail\n");
+	if (!skb) {
+		ret = hns3_alloc_skb(ring, length, ring->va);
+		*out_skb = skb = ring->skb;
 
-		u64_stats_update_begin(&ring->syncp);
-		ring->stats.sw_err_cnt++;
-		u64_stats_update_end(&ring->syncp);
+		if (ret < 0) /* alloc buffer fail */
+			return ret;
+		if (ret > 0) { /* need add frag */
+			ret = hns3_add_frag(ring, desc, &skb, false);
+			if (ret)
+				return ret;
 
-		return -ENOMEM;
-	}
-
-	prefetchw(skb->data);
-
-	bnum = 1;
-	if (length <= HNS3_RX_HEAD_SIZE) {
-		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
-
-		/* We can reuse buffer as-is, just make sure it is local */
-		if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
-			desc_cb->reuse_flag = 1;
-		else /* This page cannot be reused so discard it */
-			put_page(desc_cb->priv);
-
-		ring_ptr_move_fw(ring, next_to_clean);
-	} else {
-		u64_stats_update_begin(&ring->syncp);
-		ring->stats.seg_pkt_cnt++;
-		u64_stats_update_end(&ring->syncp);
-
-		pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
-
-		memcpy(__skb_put(skb, pull_len), va,
-		       ALIGN(pull_len, sizeof(long)));
-
-		hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
-		ring_ptr_move_fw(ring, next_to_clean);
-
-		while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
-			desc = &ring->desc[ring->next_to_clean];
-			desc_cb = &ring->desc_cb[ring->next_to_clean];
-			bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-			hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
-			ring_ptr_move_fw(ring, next_to_clean);
-			bnum++;
+			/* As the head data may be changed when GRO enable, copy
+			 * the head data in after other data rx completed
+			 */
+			memcpy(skb->data, ring->va,
+			       ALIGN(ring->pull_len, sizeof(long)));
 		}
-	}
+	} else {
+		ret = hns3_add_frag(ring, desc, &skb, true);
+		if (ret)
+			return ret;
 
-	*out_bnum = bnum;
+		/* As the head data may be changed when GRO enable, copy
+		 * the head data in after other data rx completed
+		 */
+		memcpy(skb->data, ring->va,
+		       ALIGN(ring->pull_len, sizeof(long)));
+	}
 
 	l234info = le32_to_cpu(desc->rx.l234_info);
+	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
 
 	/* Based on hw strategy, the tag offloaded will be stored at
 	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2484,7 +2674,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
 
 	ring->tqp_vector->rx_group.total_bytes += skb->len;
 
+	/* This is needed in order to enable forwarding support */
+	hns3_set_gro_param(skb, l234info, bd_base_info);
+
 	hns3_rx_checksum(ring, skb, desc);
+	*out_skb = skb;
 	hns3_set_rx_skb_rss_type(ring, skb);
 
 	return 0;
@@ -2497,9 +2691,9 @@ int hns3_clean_rx_ring(
 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
 	int recv_pkts, recv_bds, clean_count, err;
-	int unused_count = hns3_desc_unused(ring);
-	struct sk_buff *skb = NULL;
-	int num, bnum = 0;
+	int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+	struct sk_buff *skb = ring->skb;
+	int num;
 
 	num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
 	rmb(); /* Make sure num taken effect before the other data is touched */
@@ -2513,24 +2707,32 @@ int hns3_clean_rx_ring(
 			hns3_nic_alloc_rx_buffers(ring,
 						  clean_count + unused_count);
 			clean_count = 0;
-			unused_count = hns3_desc_unused(ring);
+			unused_count = hns3_desc_unused(ring) -
+					ring->pending_buf;
 		}
 
 		/* Poll one pkt */
-		err = hns3_handle_rx_bd(ring, &skb, &bnum);
+		err = hns3_handle_rx_bd(ring, &skb);
 		if (unlikely(!skb)) /* This fault cannot be repaired */
 			goto out;
 
-		recv_bds += bnum;
-		clean_count += bnum;
-		if (unlikely(err)) {  /* Do jump the err */
-			recv_pkts++;
+		if (err == -ENXIO) { /* Do not get FE for the packet */
+			goto out;
+		} else if (unlikely(err)) {  /* Do jump the err */
+			recv_bds += ring->pending_buf;
+			clean_count += ring->pending_buf;
+			ring->skb = NULL;
+			ring->pending_buf = 0;
 			continue;
 		}
 
 		/* Do update ip stack process */
 		skb->protocol = eth_type_trans(skb, netdev);
 		rx_fn(ring, skb);
+		recv_bds += ring->pending_buf;
+		clean_count += ring->pending_buf;
+		ring->skb = NULL;
+		ring->pending_buf = 0;
 
 		recv_pkts++;
 	}
@@ -2669,6 +2871,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
 
 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
 {
+	struct hns3_nic_priv *priv = netdev_priv(napi->dev);
 	struct hns3_enet_ring *ring;
 	int rx_pkt_total = 0;
 
@@ -2677,6 +2880,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
 	bool clean_complete = true;
 	int rx_budget;
 
+	if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+		napi_complete(napi);
+		return 0;
+	}
+
 	/* Since the actual Tx work is minimal, we can give the Tx a larger
 	 * budget and be more aggressive about cleaning up the Tx descriptors.
 	 */
@@ -2701,9 +2909,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
 	if (!clean_complete)
 		return budget;
 
-	napi_complete(napi);
-	hns3_update_new_int_gl(tqp_vector);
-	hns3_mask_vector_irq(tqp_vector, 1);
+	if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) &&
+	    napi_complete(napi)) {
+		hns3_update_new_int_gl(tqp_vector);
+		hns3_mask_vector_irq(tqp_vector, 1);
+	}
 
 	return rx_pkt_total;
 }
@@ -3319,6 +3529,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
 }
 
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+	if (!handle->ae_algo->ops->client_start)
+		return 0;
+
+	return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+	if (!handle->ae_algo->ops->client_stop)
+		return;
+
+	handle->ae_algo->ops->client_stop(handle);
+}
+
 static int hns3_client_init(struct hnae3_handle *handle)
 {
 	struct pci_dev *pdev = handle->pdev;
@@ -3337,7 +3563,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
 	priv->dev = &pdev->dev;
 	priv->netdev = netdev;
 	priv->ae_handle = handle;
-	priv->ae_handle->last_reset_time = jiffies;
 	priv->tx_timeout_count = 0;
 
 	handle->kinfo.netdev = netdev;
@@ -3357,11 +3582,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
 	/* Carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 
-	if (handle->flags & HNAE3_SUPPORT_VF)
-		handle->reset_level = HNAE3_VF_RESET;
-	else
-		handle->reset_level = HNAE3_FUNC_RESET;
-
 	ret = hns3_get_ring_config(priv);
 	if (ret) {
 		ret = -ENOMEM;
@@ -3392,10 +3612,20 @@ static int hns3_client_init(struct hnae3_handle *handle)
 		goto out_reg_netdev_fail;
 	}
 
+	ret = hns3_client_start(handle);
+	if (ret) {
+		dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+			goto out_reg_netdev_fail;
+	}
+
 	hns3_dcbnl_setup(handle);
 
-	/* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
-	netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+	hns3_dbg_init(handle);
+
+	/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
+	netdev->max_mtu = HNS3_MAX_MTU;
+
+	set_bit(HNS3_NIC_STATE_INITED, &priv->state);
 
 	return ret;
 
@@ -3418,11 +3648,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
 	int ret;
 
+	hns3_client_stop(handle);
+
 	hns3_remove_hw_addr(netdev);
 
 	if (netdev->reg_state != NETREG_UNINITIALIZED)
 		unregister_netdev(netdev);
 
+	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+		netdev_warn(netdev, "already uninitialized\n");
+		goto out_netdev_free;
+	}
+
 	hns3_del_all_fd_rules(netdev, true);
 
 	hns3_force_clear_all_rx_ring(handle);
@@ -3441,8 +3678,11 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
 
 	hns3_put_ring_config(priv);
 
+	hns3_dbg_uninit(handle);
+
 	priv->ring_data = NULL;
 
+out_netdev_free:
 	free_netdev(netdev);
 }
 
@@ -3708,8 +3948,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
 
 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
 {
+	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 	struct net_device *ndev = kinfo->netdev;
+	struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+	if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+		return 0;
+
+	/* it is cumbersome for hardware to pick-and-choose entries for deletion
+	 * from table space. Hence, for function reset software intervention is
+	 * required to delete the entries
+	 */
+	if (hns3_dev_ongoing_func_reset(ae_dev)) {
+		hns3_remove_hw_addr(ndev);
+		hns3_del_all_fd_rules(ndev, false);
+	}
 
 	if (!netif_running(ndev))
 		return 0;
@@ -3720,6 +3974,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
 {
 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+	struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
 	int ret = 0;
 
 	if (netif_running(kinfo->netdev)) {
@@ -3729,9 +3984,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
 				   "hns net up fail, ret=%d!\n", ret);
 			return ret;
 		}
-		handle->last_reset_time = jiffies;
 	}
 
+	clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
 	return ret;
 }
 
@@ -3771,28 +4027,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
 	/* Carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 
+	ret = hns3_nic_alloc_vector_data(priv);
+	if (ret)
+		return ret;
+
 	hns3_restore_coal(priv);
 
 	ret = hns3_nic_init_vector_data(priv);
 	if (ret)
-		return ret;
+		goto err_dealloc_vector;
 
 	ret = hns3_init_all_ring(priv);
-	if (ret) {
-		hns3_nic_uninit_vector_data(priv);
-		priv->ring_data = NULL;
-	}
+	if (ret)
+		goto err_uninit_vector;
+
+	set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+	return ret;
+
+err_uninit_vector:
+	hns3_nic_uninit_vector_data(priv);
+	priv->ring_data = NULL;
+err_dealloc_vector:
+	hns3_nic_dealloc_vector_data(priv);
 
 	return ret;
 }
 
 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
 {
-	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
 	struct net_device *netdev = handle->kinfo.netdev;
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
 	int ret;
 
+	if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+		netdev_warn(netdev, "already uninitialized\n");
+		return 0;
+	}
+
 	hns3_force_clear_all_rx_ring(handle);
 
 	ret = hns3_nic_uninit_vector_data(priv);
@@ -3803,18 +4075,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
 
 	hns3_store_coal(priv);
 
+	ret = hns3_nic_dealloc_vector_data(priv);
+	if (ret)
+		netdev_err(netdev, "dealloc vector error\n");
+
 	ret = hns3_uninit_all_ring(priv);
 	if (ret)
 		netdev_err(netdev, "uninit ring error\n");
 
-	/* it is cumbersome for hardware to pick-and-choose entries for deletion
-	 * from table space. Hence, for function reset software intervention is
-	 * required to delete the entries
-	 */
-	if (hns3_dev_ongoing_func_reset(ae_dev)) {
-		hns3_remove_hw_addr(netdev);
-		hns3_del_all_fd_rules(netdev, false);
-	}
+	clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
 
 	return ret;
 }
@@ -3980,15 +4249,23 @@ static int __init hns3_init_module(void)
 
 	INIT_LIST_HEAD(&client.node);
 
+	hns3_dbg_register_debugfs(hns3_driver_name);
+
 	ret = hnae3_register_client(&client);
 	if (ret)
-		return ret;
+		goto err_reg_client;
 
 	ret = pci_register_driver(&hns3_driver);
 	if (ret)
-		hnae3_unregister_client(&client);
+		goto err_reg_driver;
 
 	return ret;
+
+err_reg_driver:
+	hnae3_unregister_client(&client);
+err_reg_client:
+	hns3_dbg_unregister_debugfs();
+	return ret;
 }
 module_init(hns3_init_module);
 
@@ -4000,6 +4277,7 @@ static void __exit hns3_exit_module(void)
 {
 	pci_unregister_driver(&hns3_driver);
 	hnae3_unregister_client(&client);
+	hns3_dbg_unregister_debugfs();
 }
 module_exit(hns3_exit_module);
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d3636d0..4e4b48b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -15,7 +15,7 @@ extern const char hns3_driver_version[];
 enum hns3_nic_state {
 	HNS3_NIC_STATE_TESTING,
 	HNS3_NIC_STATE_RESETTING,
-	HNS3_NIC_STATE_REINITING,
+	HNS3_NIC_STATE_INITED,
 	HNS3_NIC_STATE_DOWN,
 	HNS3_NIC_STATE_DISABLED,
 	HNS3_NIC_STATE_REMOVING,
@@ -47,7 +47,7 @@ enum hns3_nic_state {
 #define HNS3_RING_PREFETCH_EN_REG		0x0007C
 #define HNS3_RING_CFG_VF_NUM_REG		0x00080
 #define HNS3_RING_ASID_REG			0x0008C
-#define HNS3_RING_RX_VM_REG			0x00090
+#define HNS3_RING_EN_REG			0x00090
 #define HNS3_RING_T0_BE_RST			0x00094
 #define HNS3_RING_COULD_BE_RST			0x00098
 #define HNS3_RING_WRR_WEIGHT_REG		0x0009c
@@ -76,7 +76,10 @@ enum hns3_nic_state {
 #define HNS3_RING_MAX_PENDING			32768
 #define HNS3_RING_MIN_PENDING			8
 #define HNS3_RING_BD_MULTIPLE			8
-#define HNS3_MAX_MTU				9728
+/* max frame size of mac */
+#define HNS3_MAC_MAX_FRAME			9728
+#define HNS3_MAX_MTU \
+	(HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
 
 #define HNS3_BD_SIZE_512_TYPE			0
 #define HNS3_BD_SIZE_1024_TYPE			1
@@ -109,6 +112,10 @@ enum hns3_nic_state {
 #define HNS3_RXD_DOI_B				21
 #define HNS3_RXD_OL3E_B				22
 #define HNS3_RXD_OL4E_B				23
+#define HNS3_RXD_GRO_COUNT_S			24
+#define HNS3_RXD_GRO_COUNT_M			(0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B			30
+#define HNS3_RXD_GRO_ECN_B			31
 
 #define HNS3_RXD_ODMAC_S			0
 #define HNS3_RXD_ODMAC_M			(0x3 << HNS3_RXD_ODMAC_S)
@@ -135,9 +142,8 @@ enum hns3_nic_state {
 #define HNS3_RXD_TSIND_S			12
 #define HNS3_RXD_TSIND_M			(0x7 << HNS3_RXD_TSIND_S)
 #define HNS3_RXD_LKBK_B				15
-#define HNS3_RXD_HDL_S				16
-#define HNS3_RXD_HDL_M				(0x7ff << HNS3_RXD_HDL_S)
-#define HNS3_RXD_HSIND_B			31
+#define HNS3_RXD_GRO_SIZE_S			16
+#define HNS3_RXD_GRO_SIZE_M			(0x3ff << HNS3_RXD_GRO_SIZE_S)
 
 #define HNS3_TXD_L3T_S				0
 #define HNS3_TXD_L3T_M				(0x3 << HNS3_TXD_L3T_S)
@@ -194,6 +200,8 @@ enum hns3_nic_state {
 #define HNS3_VECTOR_RL_OFFSET			0x900
 #define HNS3_VECTOR_RL_EN_B			6
 
+#define HNS3_RING_EN_B				0
+
 enum hns3_pkt_l3t_type {
 	HNS3_L3T_NONE,
 	HNS3_L3T_IPV6,
@@ -399,11 +407,19 @@ struct hns3_enet_ring {
 	 */
 	int next_to_clean;
 
+	int pull_len; /* head length for current packet */
+	u32 frag_num;
+	unsigned char *va; /* first buffer address for current packet */
+
 	u32 flag;          /* ring attribute */
 	int irq_init_flag;
 
 	int numa_node;
 	cpumask_t affinity_mask;
+
+	int pending_buf;
+	struct sk_buff *skb;
+	struct sk_buff *tail_skb;
 };
 
 struct hns_queue;
@@ -577,6 +593,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
 	return ring->next_to_use == ring->next_to_clean;
 }
 
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+	return readl(base + reg);
+}
+
 static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
 {
 	u8 __iomem *reg_addr = READ_ONCE(base);
@@ -586,7 +607,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
 
 static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
 {
-	return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+	return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
+			   ae_dev->reset_type == HNAE3_FLR_RESET ||
+			   ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
+			   ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
+			   ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
+}
+
+#define hns3_read_dev(a, reg) \
+	hns3_read_reg((a)->io_base, (reg))
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+	return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
 }
 
 #define hns3_write_dev(a, reg, value) \
@@ -648,4 +683,8 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle);
 static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
 #endif
 
+void hns3_dbg_init(struct hnae3_handle *handle);
+void hns3_dbg_uninit(struct hnae3_handle *handle);
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
+void hns3_dbg_unregister_debugfs(void);
 #endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a4762c2..4563638 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev,
 	int test_index = 0;
 	u32 i;
 
+	if (hns3_nic_resetting(ndev)) {
+		netdev_err(ndev, "dev resetting!");
+		return;
+	}
+
 	/* Only do offline selftest, or pass by default */
 	if (eth_test->flags != ETH_TEST_FL_OFFLINE)
 		return;
@@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
 	struct hnae3_handle *h = priv->ae_handle;
 	int queue_num = h->kinfo.num_tqps;
 
+	if (hns3_nic_resetting(netdev)) {
+		netdev_err(netdev, "dev resetting!");
+		return;
+	}
+
 	param->tx_max_pending = HNS3_RING_MAX_PENDING;
 	param->rx_max_pending = HNS3_RING_MAX_PENDING;
 
@@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
 	u32 old_desc_num, new_desc_num;
 	int ret;
 
+	if (hns3_nic_resetting(ndev))
+		return -EBUSY;
+
 	if (param->rx_mini_pending || param->rx_jumbo_pending)
 		return -EINVAL;
 
@@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
 	struct hnae3_handle *h = priv->ae_handle;
 	u16 queue_num = h->kinfo.num_tqps;
 
+	if (hns3_nic_resetting(netdev))
+		return -EBUSY;
+
 	if (queue >= queue_num) {
 		netdev_err(netdev,
 			   "Invalid queue value %d! Queue max id=%d\n",
@@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
 	int ret;
 	int i;
 
+	if (hns3_nic_resetting(netdev))
+		return -EBUSY;
+
 	ret = hns3_check_coalesce_para(netdev, cmd);
 	if (ret)
 		return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 580e817..fffe8c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -6,6 +6,6 @@
 ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o  hclge_debugfs.o
 
 hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 690f62e..8af0cef 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
 	hdev->hw.cmq.crq.next_to_use = 0;
 
 	hclge_cmd_init_regs(&hdev->hw);
-	clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
 
 	spin_unlock_bh(&hdev->hw.cmq.crq.lock);
 	spin_unlock_bh(&hdev->hw.cmq.csq.lock);
 
+	clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+	/* Check if there is new reset pending, because the higher level
+	 * reset may happen when lower level reset is being processed.
+	 */
+	if ((hclge_is_reset_pending(hdev))) {
+		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+		return -EBUSY;
+	}
+
 	ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
 	if (ret) {
 		dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 872cd4b..2b90410 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -126,6 +126,7 @@ enum hclge_opcode_type {
 	HCLGE_OPC_TM_PRI_SCH_MODE_CFG   = 0x0813,
 	HCLGE_OPC_TM_QS_SCH_MODE_CFG    = 0x0814,
 	HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+	HCLGE_OPC_ETS_TC_WEIGHT		= 0x0843,
 
 	/* Packet buffer allocate commands */
 	HCLGE_OPC_TX_BUFF_ALLOC		= 0x0901,
@@ -152,6 +153,7 @@ enum hclge_opcode_type {
 
 	/* TSO command */
 	HCLGE_OPC_TSO_GENERIC_CONFIG	= 0x0C01,
+	HCLGE_OPC_GRO_GENERIC_CONFIG    = 0x0C10,
 
 	/* RSS commands */
 	HCLGE_OPC_RSS_GENERIC_CONFIG	= 0x0D01,
@@ -758,6 +760,12 @@ struct hclge_cfg_tso_status_cmd {
 	u8 rsv[20];
 };
 
+#define HCLGE_GRO_EN_B		0
+struct hclge_cfg_gro_status_cmd {
+	__le16 gro_en;
+	u8 rsv[22];
+};
+
 #define HCLGE_TSO_MSS_MIN	256
 #define HCLGE_TSO_MSS_MAX	9668
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index e72f724..f6323b2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -35,7 +35,9 @@ static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
 		}
 	}
 
-	return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+	hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+
+	return 0;
 }
 
 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
@@ -70,25 +72,61 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
 	return 0;
 }
 
+static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
+				     u8 *prio_tc)
+{
+	int i;
+
+	if (num_tc > hdev->tc_max) {
+		dev_err(&hdev->pdev->dev,
+			"tc num checking failed, %u > tc_max(%u)\n",
+			num_tc, hdev->tc_max);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+		if (prio_tc[i] >= num_tc) {
+			dev_err(&hdev->pdev->dev,
+				"prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+				i, prio_tc[i], num_tc);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < hdev->num_alloc_vport; i++) {
+		if (num_tc > hdev->vport[i].alloc_tqps) {
+			dev_err(&hdev->pdev->dev,
+				"allocated tqp(%u) checking failed, %u > tqp(%u)\n",
+				i, num_tc, hdev->vport[i].alloc_tqps);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
 			      u8 *tc, bool *changed)
 {
 	bool has_ets_tc = false;
 	u32 total_ets_bw = 0;
 	u8 max_tc = 0;
+	int ret;
 	u8 i;
 
-	for (i = 0; i < HNAE3_MAX_TC; i++) {
-		if (ets->prio_tc[i] >= hdev->tc_max ||
-		    i >= hdev->tc_max)
-			return -EINVAL;
-
+	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
 		if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
 			*changed = true;
 
 		if (ets->prio_tc[i] > max_tc)
 			max_tc = ets->prio_tc[i];
+	}
 
+	ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < HNAE3_MAX_TC; i++) {
 		switch (ets->tc_tsa[i]) {
 		case IEEE_8021QAZ_TSA_STRICT:
 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -184,9 +222,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
 	if (ret)
 		return ret;
 
-	ret = hclge_tm_schd_info_update(hdev, num_tc);
-	if (ret)
-		return ret;
+	hclge_tm_schd_info_update(hdev, num_tc);
 
 	ret = hclge_ieee_ets_to_tm_info(hdev, ets);
 	if (ret)
@@ -305,20 +341,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
 	if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
 		return -EINVAL;
 
-	if (tc > hdev->tc_max) {
-		dev_err(&hdev->pdev->dev,
-			"setup tc failed, tc(%u) > tc_max(%u)\n",
-			tc, hdev->tc_max);
+	ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
+	if (ret)
 		return -EINVAL;
-	}
 
-	ret = hclge_tm_schd_info_update(hdev, tc);
-	if (ret)
-		return ret;
-
-	ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
-	if (ret)
-		return ret;
+	hclge_tm_schd_info_update(hdev, tc);
+	hclge_tm_prio_tc_info_update(hdev, prio_tc);
 
 	ret = hclge_tm_init_hw(hdev);
 	if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
new file mode 100644
index 0000000..14577bb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/device.h>
+
+#include "hclge_debugfs.h"
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
+				  char *title_buf, char *true_buf,
+				  char *false_buf)
+{
+	if (flag)
+		dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+			 true_buf);
+	else
+		dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+			 false_buf);
+}
+
+static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
+{
+	struct hclge_ets_tc_weight_cmd *ets_weight;
+	struct hclge_desc desc;
+	int i, ret;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
+		return;
+	}
+
+	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+	dev_info(&hdev->pdev->dev, "dump tc\n");
+	dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
+		 ets_weight->weight_offset);
+
+	for (i = 0; i < HNAE3_MAX_TC; i++)
+		hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
+				      "tc", "no sp mode", "sp mode");
+}
+
+static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
+{
+	struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+	struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
+	enum hclge_opcode_type cmd;
+	struct hclge_desc desc;
+	int ret;
+
+	cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_pg_cmd_send;
+
+	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+	dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
+		 pg_shap_cfg_cmd->pg_shapping_para);
+
+	cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_pg_cmd_send;
+
+	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+	dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
+		 pg_shap_cfg_cmd->pg_shapping_para);
+
+	cmd = HCLGE_OPC_TM_PORT_SHAPPING;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_pg_cmd_send;
+
+	port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
+		 port_shap_cfg_cmd->port_shapping_para);
+
+	cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_pg_cmd_send;
+
+	dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+
+	cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_pg_cmd_send;
+
+	dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
+
+	cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_pg_cmd_send;
+
+	dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
+
+	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_pg_cmd_send;
+
+	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
+		 bp_to_qs_map_cmd->tc_id);
+	dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
+		 bp_to_qs_map_cmd->qs_group_id);
+	dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
+		 bp_to_qs_map_cmd->qs_bit_map);
+	return;
+
+err_tm_pg_cmd_send:
+	dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
+		cmd, ret);
+}
+
+static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
+{
+	struct hclge_priority_weight_cmd *priority_weight;
+	struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
+	struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
+	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+	struct hclge_pg_weight_cmd *pg_weight;
+	struct hclge_qs_weight_cmd *qs_weight;
+	enum hclge_opcode_type cmd;
+	struct hclge_desc desc;
+	int ret;
+
+	cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "dump tm\n");
+	dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
+		 pg_to_pri_map->pg_id);
+	dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
+		 pg_to_pri_map->pri_bit_map);
+
+	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
+		 qs_to_pri_map->qs_id);
+	dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
+		 qs_to_pri_map->priority);
+	dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
+		 qs_to_pri_map->link_vld);
+
+	cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+	dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
+		 nq_to_qs_map->qset_id);
+
+	cmd = HCLGE_OPC_TM_PG_WEIGHT;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
+	dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
+
+	cmd = HCLGE_OPC_TM_QS_WEIGHT;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+	dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
+
+	cmd = HCLGE_OPC_TM_PRI_WEIGHT;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
+	dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
+
+	cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
+	dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
+		 shap_cfg_cmd->pri_shapping_para);
+
+	cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
+	hclge_cmd_setup_basic_desc(&desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		goto err_tm_cmd_send;
+
+	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
+	dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
+		 shap_cfg_cmd->pri_shapping_para);
+
+	hclge_dbg_dump_tm_pg(hdev);
+
+	return;
+
+err_tm_cmd_send:
+	dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
+		cmd, ret);
+}
+
+static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
+{
+	struct hclge_cfg_pause_param_cmd *pause_param;
+	struct hclge_desc desc;
+	int ret;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
+			ret);
+		return;
+	}
+
+	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
+	dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
+		 pause_param->pause_trans_gap);
+	dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
+		 pause_param->pause_trans_time);
+}
+
+static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
+{
+	struct hclge_qos_pri_map_cmd *pri_map;
+	struct hclge_desc desc;
+	int ret;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"dump qos pri map fail, status is %d.\n", ret);
+		return;
+	}
+
+	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
+	dev_info(&hdev->pdev->dev, "dump qos pri map\n");
+	dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+	dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
+	dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
+	dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
+	dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
+	dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
+	dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
+	dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
+	dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
+	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+	struct hclge_rx_priv_wl_buf *rx_priv_wl;
+	struct hclge_rx_com_wl *rx_packet_cnt;
+	struct hclge_rx_com_thrd *rx_com_thrd;
+	struct hclge_rx_com_wl *rx_com_wl;
+	enum hclge_opcode_type cmd;
+	struct hclge_desc desc[2];
+	int i, ret;
+
+	cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+	hclge_cmd_setup_basic_desc(desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, 1);
+	if (ret)
+		goto err_qos_cmd_send;
+
+	dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
+
+	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+	for (i = 0; i < HCLGE_TC_NUM; i++)
+		dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
+			 tx_buf_cmd->tx_pkt_buff[i]);
+
+	cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+	hclge_cmd_setup_basic_desc(desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, 1);
+	if (ret)
+		goto err_qos_cmd_send;
+
+	dev_info(&hdev->pdev->dev, "\n");
+	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+	for (i = 0; i < HCLGE_TC_NUM; i++)
+		dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
+			 rx_buf_cmd->buf_num[i]);
+
+	dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
+		 rx_buf_cmd->shared_buf);
+
+	cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, 2);
+	if (ret)
+		goto err_qos_cmd_send;
+
+	dev_info(&hdev->pdev->dev, "\n");
+	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
+	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+		dev_info(&hdev->pdev->dev,
+			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+			 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
+	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+		dev_info(&hdev->pdev->dev,
+			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+			 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+	cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, 2);
+	if (ret)
+		goto err_qos_cmd_send;
+
+	dev_info(&hdev->pdev->dev, "\n");
+	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
+	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+		dev_info(&hdev->pdev->dev,
+			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+			 rx_com_thrd->com_thrd[i].high,
+			 rx_com_thrd->com_thrd[i].low);
+
+	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
+	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+		dev_info(&hdev->pdev->dev,
+			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+			 rx_com_thrd->com_thrd[i].high,
+			 rx_com_thrd->com_thrd[i].low);
+
+	cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+	hclge_cmd_setup_basic_desc(desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, 1);
+	if (ret)
+		goto err_qos_cmd_send;
+
+	rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+	dev_info(&hdev->pdev->dev, "\n");
+	dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+		 rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+	cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+	hclge_cmd_setup_basic_desc(desc, cmd, true);
+	ret = hclge_cmd_send(&hdev->hw, desc, 1);
+	if (ret)
+		goto err_qos_cmd_send;
+
+	rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+	dev_info(&hdev->pdev->dev,
+		 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+		 rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+
+	return;
+
+err_qos_cmd_send:
+	dev_err(&hdev->pdev->dev,
+		"dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
+}
+
+static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+				   bool sel_x, u32 loc)
+{
+	struct hclge_fd_tcam_config_1_cmd *req1;
+	struct hclge_fd_tcam_config_2_cmd *req2;
+	struct hclge_fd_tcam_config_3_cmd *req3;
+	struct hclge_desc desc[3];
+	int ret, i;
+	u32 *req;
+
+	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
+	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
+
+	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+	req1->stage  = stage;
+	req1->xy_sel = sel_x ? 1 : 0;
+	req1->index  = cpu_to_le32(loc);
+
+	ret = hclge_cmd_send(&hdev->hw, desc, 3);
+	if (ret)
+		return;
+
+	dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
+		 sel_x ? "x" : "y", loc);
+
+	req = (u32 *)req1->tcam_data;
+	for (i = 0; i < 2; i++)
+		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+	req = (u32 *)req2->tcam_data;
+	for (i = 0; i < 6; i++)
+		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+	req = (u32 *)req3->tcam_data;
+	for (i = 0; i < 5; i++)
+		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+}
+
+static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
+{
+	u32 i;
+
+	for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
+		hclge_dbg_fd_tcam_read(hdev, 0, true, i);
+		hclge_dbg_fd_tcam_read(hdev, 0, false, i);
+	}
+}
+
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
+		hclge_dbg_fd_tcam(hdev);
+	} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
+		hclge_dbg_dump_tc(hdev);
+	} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
+		hclge_dbg_dump_tm(hdev);
+	} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
+		hclge_dbg_dump_qos_pause_cfg(hdev);
+	} else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
+		hclge_dbg_dump_qos_pri_map(hdev);
+	} else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
+		hclge_dbg_dump_qos_buf_cfg(hdev);
+	} else {
+		dev_info(&hdev->pdev->dev, "unknown command\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
new file mode 100644
index 0000000..50fd0b1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEBUGFS_H
+#define __HCLGE_DEBUGFS_H
+
+#pragma pack(1)
+
+struct hclge_qos_pri_map_cmd {
+	u8 pri0_tc  : 4,
+	   pri1_tc  : 4;
+	u8 pri2_tc  : 4,
+	   pri3_tc  : 4;
+	u8 pri4_tc  : 4,
+	   pri5_tc  : 4;
+	u8 pri6_tc  : 4,
+	   pri7_tc  : 4;
+	u8 vlan_pri : 4,
+	   rev	    : 4;
+};
+
+#pragma pack()
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 123c37e..6da9e22 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -818,7 +818,6 @@ static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
 static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
 					 enum hclge_err_int_type int_type)
 {
-	enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
 	struct device *dev = &hdev->pdev->dev;
 	const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
 	struct hclge_desc desc[2];
@@ -848,23 +847,17 @@ static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
 	}
 
 	err_sts = le32_to_cpu(desc[0].data[2]);
-	if (err_sts) {
+	if (err_sts)
 		hclge_log_error(dev, hw_err_lst1, err_sts);
-		reset_level = HNAE3_FUNC_RESET;
-	}
 
 	err_sts = le32_to_cpu(desc[0].data[3]);
-	if (err_sts) {
+	if (err_sts)
 		hclge_log_error(dev, hw_err_lst2, err_sts);
-		reset_level = HNAE3_FUNC_RESET;
-	}
 
 	if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
 		err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
-		if (err_sts) {
+		if (err_sts)
 			hclge_log_error(dev, hw_err_lst3, err_sts);
-			reset_level = HNAE3_FUNC_RESET;
-		}
 	}
 
 	/* clear PPP INT */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ffdd960..696cb53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -26,7 +26,7 @@
 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
 
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
 static int hclge_init_vlan_config(struct hclge_dev *hdev);
 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
@@ -921,6 +921,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
 	return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+	struct hclge_cfg_gro_status_cmd *req;
+	struct hclge_desc desc;
+	int ret;
+
+	if (!hnae3_dev_gro_supported(hdev))
+		return 0;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+	req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		dev_err(&hdev->pdev->dev,
+			"GRO hardware config cmd failed, ret = %d\n", ret);
+
+	return ret;
+}
+
 static int hclge_alloc_tqps(struct hclge_dev *hdev)
 {
 	struct hclge_tqp *tqp;
@@ -1144,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
 	for (i = 0; i < num_vport; i++) {
 		vport->back = hdev;
 		vport->vport_id = i;
+		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
 
 		if (i == 0)
 			ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -1947,10 +1970,7 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
 
 static int hclge_mac_init(struct hclge_dev *hdev)
 {
-	struct hnae3_handle *handle = &hdev->vport[0].nic;
-	struct net_device *netdev = handle->kinfo.netdev;
 	struct hclge_mac *mac = &hdev->hw.mac;
-	int mtu;
 	int ret;
 
 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
@@ -1964,15 +1984,16 @@ static int hclge_mac_init(struct hclge_dev *hdev)
 
 	mac->link = 0;
 
-	if (netdev)
-		mtu = netdev->mtu;
-	else
-		mtu = ETH_DATA_LEN;
+	ret = hclge_set_mac_mtu(hdev, hdev->mps);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+		return ret;
+	}
 
-	ret = hclge_set_mtu(handle, mtu);
+	ret = hclge_buffer_alloc(hdev);
 	if (ret)
 		dev_err(&hdev->pdev->dev,
-			"set mtu failed ret=%d\n", ret);
+			"allocate buffer fail, ret=%d\n", ret);
 
 	return ret;
 }
@@ -2144,7 +2165,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
 	 */
 
 	/* check for vector0 reset event sources */
+	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+		return HCLGE_VECTOR0_EVENT_RST;
+	}
+
 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
@@ -2152,18 +2182,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
 	}
 
 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
 		return HCLGE_VECTOR0_EVENT_RST;
 	}
 
-	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
-		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
-		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
-		return HCLGE_VECTOR0_EVENT_RST;
-	}
-
 	/* check for vector0 mailbox(=CMDQ RX) event source */
 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
@@ -2308,21 +2333,56 @@ static int hclge_notify_client(struct hclge_dev *hdev,
 		int ret;
 
 		ret = client->ops->reset_notify(handle, type);
-		if (ret)
+		if (ret) {
+			dev_err(&hdev->pdev->dev,
+				"notify nic client failed %d(%d)\n", type, ret);
 			return ret;
+		}
 	}
 
 	return 0;
 }
 
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+				    enum hnae3_reset_notify_type type)
+{
+	struct hnae3_client *client = hdev->roce_client;
+	int ret = 0;
+	u16 i;
+
+	if (!client)
+		return 0;
+
+	if (!client->ops->reset_notify)
+		return -EOPNOTSUPP;
+
+	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+		struct hnae3_handle *handle = &hdev->vport[i].roce;
+
+		ret = client->ops->reset_notify(handle, type);
+		if (ret) {
+			dev_err(&hdev->pdev->dev,
+				"notify roce client failed %d(%d)",
+				type, ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
 static int hclge_reset_wait(struct hclge_dev *hdev)
 {
 #define HCLGE_RESET_WATI_MS	100
-#define HCLGE_RESET_WAIT_CNT	5
+#define HCLGE_RESET_WAIT_CNT	200
 	u32 val, reg, reg_bit;
 	u32 cnt = 0;
 
 	switch (hdev->reset_type) {
+	case HNAE3_IMP_RESET:
+		reg = HCLGE_GLOBAL_RESET_REG;
+		reg_bit = HCLGE_IMP_RESET_BIT;
+		break;
 	case HNAE3_GLOBAL_RESET:
 		reg = HCLGE_GLOBAL_RESET_REG;
 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
@@ -2335,6 +2395,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
 		reg = HCLGE_FUN_RST_ING;
 		reg_bit = HCLGE_FUN_RST_ING_B;
 		break;
+	case HNAE3_FLR_RESET:
+		break;
 	default:
 		dev_err(&hdev->pdev->dev,
 			"Wait for unsupported reset type: %d\n",
@@ -2342,6 +2404,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
 		return -EINVAL;
 	}
 
+	if (hdev->reset_type == HNAE3_FLR_RESET) {
+		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+		       cnt++ < HCLGE_RESET_WAIT_CNT)
+			msleep(HCLGE_RESET_WATI_MS);
+
+		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+			dev_err(&hdev->pdev->dev,
+				"flr wait timeout: %d\n", cnt);
+			return -EBUSY;
+		}
+
+		return 0;
+	}
+
 	val = hclge_read_dev(&hdev->hw, reg);
 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
 		msleep(HCLGE_RESET_WATI_MS);
@@ -2358,6 +2434,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
 	return 0;
 }
 
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+	struct hclge_vf_rst_cmd *req;
+	struct hclge_desc desc;
+
+	req = (struct hclge_vf_rst_cmd *)desc.data;
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+	req->dest_vfid = func_id;
+
+	if (reset)
+		req->vf_rst = 0x1;
+
+	return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+	int i;
+
+	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+		struct hclge_vport *vport = &hdev->vport[i];
+		int ret;
+
+		/* Send cmd to set/clear VF's FUNC_RST_ING */
+		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+		if (ret) {
+			dev_err(&hdev->pdev->dev,
+				"set vf(%d) rst failed %d!\n",
+				vport->vport_id, ret);
+			return ret;
+		}
+
+		if (!reset)
+			continue;
+
+		/* Inform VF to process the reset.
+		 * hclge_inform_reset_assert_to_vf may fail if VF
+		 * driver is not loaded.
+		 */
+		ret = hclge_inform_reset_assert_to_vf(vport);
+		if (ret)
+			dev_warn(&hdev->pdev->dev,
+				 "inform reset to vf(%d) failed %d!\n",
+				 vport->vport_id, ret);
+	}
+
+	return 0;
+}
+
 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
 {
 	struct hclge_desc desc;
@@ -2396,11 +2521,16 @@ static void hclge_do_reset(struct hclge_dev *hdev)
 		break;
 	case HNAE3_FUNC_RESET:
 		dev_info(&pdev->dev, "PF Reset requested\n");
-		hclge_func_reset_cmd(hdev, 0);
 		/* schedule again to check later */
 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
 		hclge_reset_task_schedule(hdev);
 		break;
+	case HNAE3_FLR_RESET:
+		dev_info(&pdev->dev, "FLR requested\n");
+		/* schedule again to check later */
+		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
+		hclge_reset_task_schedule(hdev);
+		break;
 	default:
 		dev_warn(&pdev->dev,
 			 "Unsupported reset type: %d\n", hdev->reset_type);
@@ -2414,20 +2544,28 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
 
 	/* return the highest priority reset level amongst all */
-	if (test_bit(HNAE3_GLOBAL_RESET, addr))
-		rst_level = HNAE3_GLOBAL_RESET;
-	else if (test_bit(HNAE3_CORE_RESET, addr))
-		rst_level = HNAE3_CORE_RESET;
-	else if (test_bit(HNAE3_IMP_RESET, addr))
+	if (test_bit(HNAE3_IMP_RESET, addr)) {
 		rst_level = HNAE3_IMP_RESET;
-	else if (test_bit(HNAE3_FUNC_RESET, addr))
+		clear_bit(HNAE3_IMP_RESET, addr);
+		clear_bit(HNAE3_GLOBAL_RESET, addr);
+		clear_bit(HNAE3_CORE_RESET, addr);
+		clear_bit(HNAE3_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
+		rst_level = HNAE3_GLOBAL_RESET;
+		clear_bit(HNAE3_GLOBAL_RESET, addr);
+		clear_bit(HNAE3_CORE_RESET, addr);
+		clear_bit(HNAE3_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
+		rst_level = HNAE3_CORE_RESET;
+		clear_bit(HNAE3_CORE_RESET, addr);
+		clear_bit(HNAE3_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
 		rst_level = HNAE3_FUNC_RESET;
-
-	/* now, clear all other resets */
-	clear_bit(HNAE3_GLOBAL_RESET, addr);
-	clear_bit(HNAE3_CORE_RESET, addr);
-	clear_bit(HNAE3_IMP_RESET, addr);
-	clear_bit(HNAE3_FUNC_RESET, addr);
+		clear_bit(HNAE3_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
+		rst_level = HNAE3_FLR_RESET;
+		clear_bit(HNAE3_FLR_RESET, addr);
+	}
 
 	return rst_level;
 }
@@ -2457,39 +2595,206 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
 	hclge_enable_vector(&hdev->misc_vector, true);
 }
 
+static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+{
+	int ret = 0;
+
+	switch (hdev->reset_type) {
+	case HNAE3_FUNC_RESET:
+		/* fall through */
+	case HNAE3_FLR_RESET:
+		ret = hclge_set_all_vf_rst(hdev, true);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+	u32 reg_val;
+	int ret = 0;
+
+	switch (hdev->reset_type) {
+	case HNAE3_FUNC_RESET:
+		/* There is no mechanism for PF to know if VF has stopped IO
+		 * for now, just wait 100 ms for VF to stop IO
+		 */
+		msleep(100);
+		ret = hclge_func_reset_cmd(hdev, 0);
+		if (ret) {
+			dev_err(&hdev->pdev->dev,
+				"asserting function reset fail %d!\n", ret);
+			return ret;
+		}
+
+		/* After performaning pf reset, it is not necessary to do the
+		 * mailbox handling or send any command to firmware, because
+		 * any mailbox handling or command to firmware is only valid
+		 * after hclge_cmd_init is called.
+		 */
+		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+		break;
+	case HNAE3_FLR_RESET:
+		/* There is no mechanism for PF to know if VF has stopped IO
+		 * for now, just wait 100 ms for VF to stop IO
+		 */
+		msleep(100);
+		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+		break;
+	case HNAE3_IMP_RESET:
+		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+		break;
+	default:
+		break;
+	}
+
+	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+	return ret;
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+{
+#define MAX_RESET_FAIL_CNT 5
+#define RESET_UPGRADE_DELAY_SEC 10
+
+	if (hdev->reset_pending) {
+		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+			 hdev->reset_pending);
+		return true;
+	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
+		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
+		    BIT(HCLGE_IMP_RESET_BIT))) {
+		dev_info(&hdev->pdev->dev,
+			 "reset failed because IMP Reset is pending\n");
+		hclge_clear_reset_cause(hdev);
+		return false;
+	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+		hdev->reset_fail_cnt++;
+		if (is_timeout) {
+			set_bit(hdev->reset_type, &hdev->reset_pending);
+			dev_info(&hdev->pdev->dev,
+				 "re-schedule to wait for hw reset done\n");
+			return true;
+		}
+
+		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
+		hclge_clear_reset_cause(hdev);
+		mod_timer(&hdev->reset_timer,
+			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+
+		return false;
+	}
+
+	hclge_clear_reset_cause(hdev);
+	dev_err(&hdev->pdev->dev, "Reset fail!\n");
+	return false;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+	int ret = 0;
+
+	switch (hdev->reset_type) {
+	case HNAE3_FUNC_RESET:
+		/* fall through */
+	case HNAE3_FLR_RESET:
+		ret = hclge_set_all_vf_rst(hdev, false);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
 static void hclge_reset(struct hclge_dev *hdev)
 {
 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
-	struct hnae3_handle *handle;
+	bool is_timeout = false;
+	int ret;
 
 	/* Initialize ae_dev reset status as well, in case enet layer wants to
 	 * know if device is undergoing reset
 	 */
 	ae_dev->reset_type = hdev->reset_type;
+	hdev->reset_count++;
+	hdev->last_reset_time = jiffies;
 	/* perform reset of the stack & ae device for a client */
-	handle = &hdev->vport[0].nic;
+	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+	if (ret)
+		goto err_reset;
+
+	ret = hclge_reset_prepare_down(hdev);
+	if (ret)
+		goto err_reset;
+
 	rtnl_lock();
-	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+	if (ret)
+		goto err_reset_lock;
+
 	rtnl_unlock();
 
-	if (!hclge_reset_wait(hdev)) {
-		rtnl_lock();
-		hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-		hclge_reset_ae_dev(hdev->ae_dev);
-		hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+	ret = hclge_reset_prepare_wait(hdev);
+	if (ret)
+		goto err_reset;
 
-		hclge_clear_reset_cause(hdev);
-	} else {
-		rtnl_lock();
-		/* schedule again to check pending resets later */
-		set_bit(hdev->reset_type, &hdev->reset_pending);
-		hclge_reset_task_schedule(hdev);
+	if (hclge_reset_wait(hdev)) {
+		is_timeout = true;
+		goto err_reset;
 	}
 
-	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
-	handle->last_reset_time = jiffies;
+	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+	if (ret)
+		goto err_reset;
+
+	rtnl_lock();
+	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+	if (ret)
+		goto err_reset_lock;
+
+	ret = hclge_reset_ae_dev(hdev->ae_dev);
+	if (ret)
+		goto err_reset_lock;
+
+	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+	if (ret)
+		goto err_reset_lock;
+
+	hclge_clear_reset_cause(hdev);
+
+	ret = hclge_reset_prepare_up(hdev);
+	if (ret)
+		goto err_reset_lock;
+
+	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+	if (ret)
+		goto err_reset_lock;
+
 	rtnl_unlock();
-	ae_dev->reset_type = HNAE3_NONE_RESET;
+
+	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+	if (ret)
+		goto err_reset;
+
+	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+	if (ret)
+		goto err_reset;
+
+	return;
+
+err_reset_lock:
+	rtnl_unlock();
+err_reset:
+	if (hclge_reset_err_handle(hdev, is_timeout))
+		hclge_reset_task_schedule(hdev);
 }
 
 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
@@ -2515,20 +2820,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
 	if (!handle)
 		handle = &hdev->vport[0].nic;
 
-	if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
 		return;
-	else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
-		handle->reset_level = HNAE3_FUNC_RESET;
+	else if (hdev->default_reset_request)
+		hdev->reset_level =
+			hclge_get_reset_level(hdev,
+					      &hdev->default_reset_request);
+	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+		hdev->reset_level = HNAE3_FUNC_RESET;
 
 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
-		 handle->reset_level);
+		 hdev->reset_level);
 
 	/* request reset & schedule reset task */
-	set_bit(handle->reset_level, &hdev->reset_request);
+	set_bit(hdev->reset_level, &hdev->reset_request);
 	hclge_reset_task_schedule(hdev);
 
-	if (handle->reset_level < HNAE3_GLOBAL_RESET)
-		handle->reset_level++;
+	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+		hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+					enum hnae3_reset_type rst_type)
+{
+	struct hclge_dev *hdev = ae_dev->priv;
+
+	set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+	dev_info(&hdev->pdev->dev,
+		 "triggering global reset in reset timer\n");
+	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+	hclge_reset_event(hdev->pdev, NULL);
 }
 
 static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -2542,6 +2869,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
 	 *    b. else, we can come back later to check this status so re-sched
 	 *       now.
 	 */
+	hdev->last_reset_time = jiffies;
 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
 	if (hdev->reset_type != HNAE3_NONE_RESET)
 		hclge_reset(hdev);
@@ -2584,6 +2912,23 @@ static void hclge_mailbox_service_task(struct work_struct *work)
 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
 }
 
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+	int i;
+
+	/* start from vport 1 for PF is always alive */
+	for (i = 1; i < hdev->num_alloc_vport; i++) {
+		struct hclge_vport *vport = &hdev->vport[i];
+
+		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+		/* If vf is not alive, set to default value */
+		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+	}
+}
+
 static void hclge_service_task(struct work_struct *work)
 {
 	struct hclge_dev *hdev =
@@ -2596,6 +2941,7 @@ static void hclge_service_task(struct work_struct *work)
 
 	hclge_update_speed_duplex(hdev);
 	hclge_update_link_status(hdev);
+	hclge_update_vport_alive(hdev);
 	hclge_service_complete(hdev);
 }
 
@@ -4336,8 +4682,12 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
 	struct hlist_node *node;
 	int ret;
 
+	/* Return ok here, because reset error handling will check this
+	 * return value. If error is returned here, the reset process will
+	 * fail.
+	 */
 	if (!hnae3_dev_fd_supported(hdev))
-		return -EOPNOTSUPP;
+		return 0;
 
 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
@@ -4592,6 +4942,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
 	return 0;
 }
 
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	return hdev->reset_count;
+}
+
 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4805,10 +5180,6 @@ static int hclge_ae_start(struct hnae3_handle *handle)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
-	int i;
-
-	for (i = 0; i < vport->alloc_tqps; i++)
-		hclge_tqp_enable(hdev, i, 0, true);
 
 	/* mac enable */
 	hclge_cfg_mac_mode(hdev, true);
@@ -4828,7 +5199,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
-	int i;
 
 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
 
@@ -4836,14 +5206,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
 	cancel_work_sync(&hdev->service_task);
 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
 
-	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+	/* If it is not PF reset, the firmware will disable the MAC,
+	 * so it only need to stop phy here.
+	 */
+	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+	    hdev->reset_type != HNAE3_FUNC_RESET) {
 		hclge_mac_stop_phy(hdev);
 		return;
 	}
 
-	for (i = 0; i < vport->alloc_tqps; i++)
-		hclge_tqp_enable(hdev, i, 0, false);
-
 	/* Mac disable */
 	hclge_cfg_mac_mode(hdev, false);
 
@@ -4856,6 +5227,32 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
 	hclge_update_link_status(hdev);
 }
 
+int hclge_vport_start(struct hclge_vport *vport)
+{
+	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+	vport->last_active_jiffies = jiffies;
+	return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+
+	return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+
+	hclge_vport_stop(vport);
+}
+
 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
 					 u16 cmdq_resp, u8  resp_code,
 					 enum hclge_mac_vlan_tbl_opcode op)
@@ -6003,54 +6400,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
 	return hclge_set_vlan_rx_offload_cfg(vport);
 }
 
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
 {
 	struct hclge_config_max_frm_size_cmd *req;
 	struct hclge_desc desc;
-	int max_frm_size;
-	int ret;
-
-	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
-	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
-	    max_frm_size > HCLGE_MAC_MAX_FRAME)
-		return -EINVAL;
-
-	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
 
 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
 
 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
-	req->max_frm_size = cpu_to_le16(max_frm_size);
+	req->max_frm_size = cpu_to_le16(new_mps);
 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
 
-	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-	if (ret)
-		dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
-	else
-		hdev->mps = max_frm_size;
-
-	return ret;
+	return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
-	struct hclge_dev *hdev = vport->back;
-	int ret;
 
-	ret = hclge_set_mac_mtu(hdev, new_mtu);
+	return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
+	struct hclge_dev *hdev = vport->back;
+	int i, max_frm_size, ret = 0;
+
+	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+	    max_frm_size > HCLGE_MAC_MAX_FRAME)
+		return -EINVAL;
+
+	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+	mutex_lock(&hdev->vport_lock);
+	/* VF's mps must fit within hdev->mps */
+	if (vport->vport_id && max_frm_size > hdev->mps) {
+		mutex_unlock(&hdev->vport_lock);
+		return -EINVAL;
+	} else if (vport->vport_id) {
+		vport->mps = max_frm_size;
+		mutex_unlock(&hdev->vport_lock);
+		return 0;
+	}
+
+	/* PF's mps must be greater then VF's mps */
+	for (i = 1; i < hdev->num_alloc_vport; i++)
+		if (max_frm_size < hdev->vport[i].mps) {
+			mutex_unlock(&hdev->vport_lock);
+			return -EINVAL;
+		}
+
+	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+	ret = hclge_set_mac_mtu(hdev, max_frm_size);
 	if (ret) {
 		dev_err(&hdev->pdev->dev,
 			"Change mtu fail, ret =%d\n", ret);
-		return ret;
+		goto out;
 	}
 
+	hdev->mps = max_frm_size;
+	vport->mps = max_frm_size;
+
 	ret = hclge_buffer_alloc(hdev);
 	if (ret)
 		dev_err(&hdev->pdev->dev,
 			"Allocate buffer fail, ret =%d\n", ret);
 
+out:
+	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+	mutex_unlock(&hdev->vport_lock);
 	return ret;
 }
 
@@ -6250,7 +6669,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
 	if (!phydev->link || !phydev->autoneg)
 		return 0;
 
-	local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
 
 	if (phydev->pause)
 		remote_advertising = LPA_PAUSE_CAP;
@@ -6612,6 +7031,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
 
 	if (hdev->service_timer.function)
 		del_timer_sync(&hdev->service_timer);
+	if (hdev->reset_timer.function)
+		del_timer_sync(&hdev->reset_timer);
 	if (hdev->service_task.func)
 		cancel_work_sync(&hdev->service_task);
 	if (hdev->rst_service_task.func)
@@ -6620,6 +7041,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
 		cancel_work_sync(&hdev->mbx_service_task);
 }
 
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_WAIT_MS	100
+#define HCLGE_FLR_WAIT_CNT	50
+	struct hclge_dev *hdev = ae_dev->priv;
+	int cnt = 0;
+
+	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+	hclge_reset_event(hdev->pdev, NULL);
+
+	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+	       cnt++ < HCLGE_FLR_WAIT_CNT)
+		msleep(HCLGE_FLR_WAIT_MS);
+
+	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+		dev_err(&hdev->pdev->dev,
+			"flr wait down timeout: %d\n", cnt);
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+	struct hclge_dev *hdev = ae_dev->priv;
+
+	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 {
 	struct pci_dev *pdev = ae_dev->pdev;
@@ -6635,7 +7084,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 	hdev->pdev = pdev;
 	hdev->ae_dev = ae_dev;
 	hdev->reset_type = HNAE3_NONE_RESET;
+	hdev->reset_level = HNAE3_FUNC_RESET;
 	ae_dev->priv = hdev;
+	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+	mutex_init(&hdev->vport_lock);
 
 	ret = hclge_pci_init(hdev);
 	if (ret) {
@@ -6727,6 +7180,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 		goto err_mdiobus_unreg;
 	}
 
+	ret = hclge_config_gro(hdev, true);
+	if (ret)
+		goto err_mdiobus_unreg;
+
 	ret = hclge_init_vlan_config(hdev);
 	if (ret) {
 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6769,6 +7226,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 	hclge_dcb_ops_set(hdev);
 
 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
+	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
 	INIT_WORK(&hdev->service_task, hclge_service_task);
 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
@@ -6779,6 +7237,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 	hclge_enable_vector(&hdev->misc_vector, true);
 
 	hclge_state_init(hdev);
+	hdev->last_reset_time = jiffies;
 
 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
 	return 0;
@@ -6806,6 +7265,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
 }
 
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+	struct hclge_vport *vport = hdev->vport;
+	int i;
+
+	for (i = 0; i < hdev->num_alloc_vport; i++) {
+		hclge_vport_start(vport);
+		vport++;
+	}
+}
+
 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
 {
 	struct hclge_dev *hdev = ae_dev->priv;
@@ -6856,6 +7326,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
 		return ret;
 	}
 
+	ret = hclge_config_gro(hdev, true);
+	if (ret)
+		return ret;
+
 	ret = hclge_init_vlan_config(hdev);
 	if (ret) {
 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6887,6 +7361,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
 	if (hclge_enable_tm_hw_error(hdev, true))
 		dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
 
+	hclge_reset_vport_state(hdev);
+
 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
 		 HCLGE_DRIVER_NAME);
 
@@ -6913,6 +7389,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
 	hclge_destroy_cmd_queue(&hdev->hw);
 	hclge_misc_irq_uninit(hdev);
 	hclge_pci_uninit(hdev);
+	mutex_destroy(&hdev->vport_lock);
 	ae_dev->priv = NULL;
 }
 
@@ -7272,9 +7749,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
 	}
 }
 
+static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	return hclge_config_gro(hdev, enable);
+}
+
 static const struct hnae3_ae_ops hclge_ops = {
 	.init_ae_dev = hclge_init_ae_dev,
 	.uninit_ae_dev = hclge_uninit_ae_dev,
+	.flr_prepare = hclge_flr_prepare,
+	.flr_done = hclge_flr_done,
 	.init_client_instance = hclge_init_client_instance,
 	.uninit_client_instance = hclge_uninit_client_instance,
 	.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -7285,6 +7772,8 @@ static const struct hnae3_ae_ops hclge_ops = {
 	.set_loopback = hclge_set_loopback,
 	.start = hclge_ae_start,
 	.stop = hclge_ae_stop,
+	.client_start = hclge_client_start,
+	.client_stop = hclge_client_stop,
 	.get_status = hclge_get_status,
 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
@@ -7321,6 +7810,7 @@ static const struct hnae3_ae_ops hclge_ops = {
 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
 	.reset_event = hclge_reset_event,
+	.set_default_reset_request = hclge_set_def_reset_request,
 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
 	.set_channels = hclge_set_channels,
 	.get_channels = hclge_get_channels,
@@ -7336,7 +7826,12 @@ static const struct hnae3_ae_ops hclge_ops = {
 	.get_fd_all_rules = hclge_get_all_rules,
 	.restore_fd_rules = hclge_restore_fd_entries,
 	.enable_fd = hclge_enable_fd,
+	.dbg_run_cmd = hclge_dbg_run_cmd,
 	.process_hw_error = hclge_process_ras_hw_error,
+	.get_hw_reset_stat = hclge_get_hw_reset_stat,
+	.ae_dev_resetting = hclge_ae_dev_resetting,
+	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+	.set_gro_en = hclge_gro_en,
 };
 
 static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0d92154..4122ad1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -97,11 +97,13 @@ enum HLCGE_PORT_TYPE {
 #define HCLGE_NETWORK_PORT_ID_M		GENMASK(3, 0)
 
 /* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG		0x20600
 #define HCLGE_MISC_RESET_STS_REG	0x20700
 #define HCLGE_MISC_VECTOR_INT_STS	0x20800
 #define HCLGE_GLOBAL_RESET_REG		0x20A00
 #define HCLGE_GLOBAL_RESET_BIT		0
 #define HCLGE_CORE_RESET_BIT		1
+#define HCLGE_IMP_RESET_BIT		2
 #define HCLGE_FUN_RST_ING		0x20C00
 #define HCLGE_FUN_RST_ING_B		0
 
@@ -115,8 +117,10 @@ enum HLCGE_PORT_TYPE {
 /* CMDQ register bits for RX event(=MBX event) */
 #define HCLGE_VECTOR0_RX_CMDQ_INT_B	1
 
+#define HCLGE_VECTOR0_IMP_RESET_INT_B	1
+
 #define HCLGE_MAC_DEFAULT_FRAME \
-	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+	(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
 #define HCLGE_MAC_MIN_FRAME		64
 #define HCLGE_MAC_MAX_FRAME		9728
 
@@ -593,10 +597,16 @@ struct hclge_dev {
 	struct hclge_misc_vector misc_vector;
 	struct hclge_hw_stats hw_stats;
 	unsigned long state;
+	unsigned long flr_state;
+	unsigned long last_reset_time;
 
 	enum hnae3_reset_type reset_type;
+	enum hnae3_reset_type reset_level;
+	unsigned long default_reset_request;
 	unsigned long reset_request;	/* reset has been requested */
 	unsigned long reset_pending;	/* client rst is pending to be served */
+	unsigned long reset_count;	/* the number of reset has been done */
+	u32 reset_fail_cnt;
 	u32 fw_version;
 	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
 	u16 num_tqps;			/* Num task queue pairs of this PF */
@@ -644,6 +654,7 @@ struct hclge_dev {
 	unsigned long service_timer_period;
 	unsigned long service_timer_previous;
 	struct timer_list service_timer;
+	struct timer_list reset_timer;
 	struct work_struct service_task;
 	struct work_struct rst_service_task;
 	struct work_struct mbx_service_task;
@@ -667,6 +678,8 @@ struct hclge_dev {
 
 	u32 pkt_buf_size; /* Total pf buf size for tx/rx */
 	u32 mps; /* Max packet size */
+	/* vport_lock protect resource shared by vports */
+	struct mutex vport_lock;
 
 	struct hclge_vlan_type_cfg vlan_type_cfg;
 
@@ -717,6 +730,11 @@ struct hclge_rss_tuple_cfg {
 	u8 ipv6_fragment_en;
 };
 
+enum HCLGE_VPORT_STATE {
+	HCLGE_VPORT_STATE_ALIVE,
+	HCLGE_VPORT_STATE_MAX
+};
+
 struct hclge_vport {
 	u16 alloc_tqps;	/* Allocated Tx/Rx queues */
 
@@ -742,6 +760,10 @@ struct hclge_vport {
 	struct hclge_dev *back;  /* Back reference to associated dev */
 	struct hnae3_handle nic;
 	struct hnae3_handle roce;
+
+	unsigned long state;
+	unsigned long last_active_jiffies;
+	u32 mps; /* Max packet size */
 };
 
 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -768,6 +790,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
 	return tqp->index;
 }
 
+static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
+{
+	return !!hdev->reset_pending;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
 			  u16 vlan_id, bool is_kill);
@@ -777,9 +805,14 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
 int hclge_rss_init_hw(struct hclge_dev *hdev);
 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
 
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
 void hclge_mbx_handler(struct hclge_dev *hdev);
 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
 #endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f890022..e16a730 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
 	return status;
 }
 
-static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
 {
+	struct hclge_dev *hdev = vport->back;
+	enum hnae3_reset_type reset_type;
 	u8 msg_data[2];
 	u8 dest_vfid;
 
 	dest_vfid = (u8)vport->vport_id;
 
+	if (hdev->reset_type == HNAE3_FUNC_RESET)
+		reset_type = HNAE3_VF_PF_FUNC_RESET;
+	else if (hdev->reset_type == HNAE3_FLR_RESET)
+		reset_type = HNAE3_VF_FULL_RESET;
+	else
+		return -EINVAL;
+
+	memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
 	/* send this requested info to VF */
-	return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+	return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
 				  HCLGE_MBX_ASSERTING_RESET, dest_vfid);
 }
 
@@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
 	return status;
 }
 
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+			      struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+			      bool gen_resp)
+{
+	bool alive = !!mbx_req->msg[2];
+	int ret = 0;
+
+	if (alive)
+		ret = hclge_vport_start(vport);
+	else
+		hclge_vport_stop(vport);
+
+	return ret;
+}
+
 static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
 			       struct hclge_mbx_vf_to_pf_cmd *mbx_req,
 			       bool gen_resp)
@@ -363,24 +389,28 @@ static void hclge_reset_vf(struct hclge_vport *vport,
 	int ret;
 
 	dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
-		 mbx_req->mbx_src_vfid);
+		 vport->vport_id);
 
-	/* Acknowledge VF that PF is now about to assert the reset for the VF.
-	 * On receiving this message VF will get into pending state and will
-	 * start polling for the hardware reset completion status.
-	 */
-	ret = hclge_inform_reset_assert_to_vf(vport);
-	if (ret) {
-		dev_err(&hdev->pdev->dev,
-			"PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
-			ret, vport->vport_id);
-		return;
-	}
+	ret = hclge_func_reset_cmd(hdev, vport->vport_id);
+	hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
 
-	dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
-		 mbx_req->mbx_src_vfid);
-	/* reset this virtual function */
-	hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
+static void hclge_vf_keep_alive(struct hclge_vport *vport,
+				struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+	vport->last_active_jiffies = jiffies;
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+			    struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+	int ret;
+	u32 mtu;
+
+	memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+	ret = hclge_set_vport_mtu(vport, mtu);
+
+	return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
 }
 
 static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -460,6 +490,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
 					"PF failed(%d) to config VF's VLAN\n",
 					ret);
 			break;
+		case HCLGE_MBX_SET_ALIVE:
+			ret = hclge_set_vf_alive(vport, req, false);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF failed(%d) to set VF's ALIVE\n",
+					ret);
+			break;
 		case HCLGE_MBX_GET_QINFO:
 			ret = hclge_get_vf_queue_info(vport, req, true);
 			if (ret)
@@ -487,6 +524,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
 		case HCLGE_MBX_RESET:
 			hclge_reset_vf(vport, req);
 			break;
+		case HCLGE_MBX_KEEP_ALIVE:
+			hclge_vf_keep_alive(vport, req);
+			break;
+		case HCLGE_MBX_SET_MTU:
+			ret = hclge_set_vf_mtu(vport, req);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"VF fail(%d) to set mtu\n", ret);
+			break;
 		default:
 			dev_err(&hdev->pdev->dev,
 				"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 0301863..741cb3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -195,12 +195,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
 {
 	struct net_device *netdev = hdev->vport[0].nic.netdev;
 	struct phy_device *phydev = hdev->hw.mac.phydev;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 	int ret;
 
 	if (!phydev)
 		return 0;
 
-	phydev->supported &= ~SUPPORTED_FIBRE;
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
 
 	ret = phy_connect_direct(netdev, phydev,
 				 hclge_mac_adjust_link,
@@ -210,7 +211,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
 		return ret;
 	}
 
-	phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       mask);
+	linkmode_set_bit_array(phy_gbit_features_array,
+			       ARRAY_SIZE(phy_gbit_features_array),
+			       mask);
+	linkmode_and(phydev->supported, phydev->supported, mask);
 	phy_support_asym_pause(phydev);
 
 	return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 494e562..00458da 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1259,15 +1259,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
 	return 0;
 }
 
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
 {
 	struct hclge_vport *vport = hdev->vport;
 	struct hnae3_knic_private_info *kinfo;
 	u32 i, k;
 
 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
-		if (prio_tc[i] >= hdev->tm_info.num_tc)
-			return -EINVAL;
 		hdev->tm_info.prio_tc[i] = prio_tc[i];
 
 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
@@ -1275,18 +1273,12 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
 			kinfo->prio_tc[i] = prio_tc[i];
 		}
 	}
-	return 0;
 }
 
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
 {
 	u8 i, bit_map = 0;
 
-	for (i = 0; i < hdev->num_alloc_vport; i++) {
-		if (num_tc > hdev->vport[i].alloc_tqps)
-			return -EINVAL;
-	}
-
 	hdev->tm_info.num_tc = num_tc;
 
 	for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -1300,8 +1292,6 @@ int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
 	hdev->hw_tc_map = bit_map;
 
 	hclge_tm_schd_info_init(hdev);
-
-	return 0;
 }
 
 int hclge_tm_init_hw(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 25eef13..9c6192c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -55,6 +55,12 @@ struct hclge_qs_weight_cmd {
 	u8 dwrr;
 };
 
+struct hclge_ets_tc_weight_cmd {
+	u8 tc_weight[HNAE3_MAX_TC];
+	u8 weight_offset;
+	u8 rsvd[15];
+};
+
 #define HCLGE_TM_SHAP_IR_B_MSK  GENMASK(7, 0)
 #define HCLGE_TM_SHAP_IR_B_LSH	0
 #define HCLGE_TM_SHAP_IR_U_MSK  GENMASK(11, 8)
@@ -131,8 +137,8 @@ struct hclge_port_shapping_cmd {
 int hclge_tm_schd_init(struct hclge_dev *hdev);
 int hclge_pause_setup_hw(struct hclge_dev *hdev);
 int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_map_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 0d3b445..d5765c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
 	return false;
 }
 
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
+{
+	struct hclgevf_dev *hdev = ring->dev;
+	struct hclgevf_hw *hw = &hdev->hw;
+	u32 reg_val;
+
+	if (ring->flag == HCLGEVF_TYPE_CSQ) {
+		reg_val = (u32)ring->desc_dma_addr;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+	} else {
+		reg_val = (u32)ring->desc_dma_addr;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+	}
+}
+
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
+{
+	hclgevf_cmd_config_regs(&hw->cmq.csq);
+	hclgevf_cmd_config_regs(&hw->cmq.crq);
+}
+
 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
 {
 	int size = ring->desc_num * sizeof(struct hclgevf_desc);
@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
 	}
 }
 
-static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
-				  struct hclgevf_cmq_ring *ring)
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
 {
 	struct hclgevf_hw *hw = &hdev->hw;
-	int ring_type = ring->flag;
-	u32 reg_val;
+	struct hclgevf_cmq_ring *ring =
+		(ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
 	int ret;
 
-	ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
-	spin_lock_init(&ring->lock);
-	ring->next_to_clean = 0;
-	ring->next_to_use = 0;
 	ring->dev = hdev;
+	ring->flag = ring_type;
 
 	/* allocate CSQ/CRQ descriptor */
 	ret = hclgevf_alloc_cmd_desc(ring);
-	if (ret) {
+	if (ret)
 		dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
 			(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
-		return ret;
-	}
 
-	/* initialize the hardware registers with csq/crq dma-address,
-	 * descriptor number, head & tail pointers
-	 */
-	switch (ring_type) {
-	case HCLGEVF_TYPE_CSQ:
-		reg_val = (u32)ring->desc_dma_addr;
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
-		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
-		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
-		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
-		return 0;
-	case HCLGEVF_TYPE_CRQ:
-		reg_val = (u32)ring->desc_dma_addr;
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
-		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
-		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
-		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
-		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
-		return 0;
-	default:
-		return -EINVAL;
-	}
+	return ret;
 }
 
 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
@@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
 
 	spin_lock_bh(&hw->cmq.csq.lock);
 
-	if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+	if (num > hclgevf_ring_space(&hw->cmq.csq) ||
+	    test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
 		spin_unlock_bh(&hw->cmq.csq.lock);
 		return -EBUSY;
 	}
@@ -282,55 +284,83 @@ static int  hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
 	return status;
 }
 
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
+{
+	int ret;
+
+	/* Setup the lock for command queue */
+	spin_lock_init(&hdev->hw.cmq.csq.lock);
+	spin_lock_init(&hdev->hw.cmq.crq.lock);
+
+	hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+	hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+	hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+
+	ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"CSQ ring setup error %d\n", ret);
+		return ret;
+	}
+
+	ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"CRQ ring setup error %d\n", ret);
+		goto err_csq;
+	}
+
+	return 0;
+err_csq:
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+	return ret;
+}
+
 int hclgevf_cmd_init(struct hclgevf_dev *hdev)
 {
 	u32 version;
 	int ret;
 
-	/* setup Tx write back timeout */
-	hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
-
-	/* setup queue CSQ/CRQ rings */
-	hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
-	ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
-	if (ret) {
-		dev_err(&hdev->pdev->dev,
-			"failed(%d) to initialize CSQ ring\n", ret);
-		return ret;
-	}
-
-	hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
-	ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
-	if (ret) {
-		dev_err(&hdev->pdev->dev,
-			"failed(%d) to initialize CRQ ring\n", ret);
-		goto err_csq;
-	}
+	spin_lock_bh(&hdev->hw.cmq.csq.lock);
+	spin_lock_bh(&hdev->hw.cmq.crq.lock);
 
 	/* initialize the pointers of async rx queue of mailbox */
 	hdev->arq.hdev = hdev;
 	hdev->arq.head = 0;
 	hdev->arq.tail = 0;
 	hdev->arq.count = 0;
+	hdev->hw.cmq.csq.next_to_clean = 0;
+	hdev->hw.cmq.csq.next_to_use = 0;
+	hdev->hw.cmq.crq.next_to_clean = 0;
+	hdev->hw.cmq.crq.next_to_use = 0;
+
+	hclgevf_cmd_init_regs(&hdev->hw);
+
+	spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+	spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+	clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+	/* Check if there is new reset pending, because the higher level
+	 * reset may happen when lower level reset is being processed.
+	 */
+	if (hclgevf_is_reset_pending(hdev)) {
+		set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+		return -EBUSY;
+	}
 
 	/* get firmware version */
 	ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
 	if (ret) {
 		dev_err(&hdev->pdev->dev,
 			"failed(%d) to query firmware version\n", ret);
-		goto err_crq;
+		return ret;
 	}
 	hdev->fw_version = version;
 
 	dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
 
 	return 0;
-err_crq:
-	hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-err_csq:
-	hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
-
-	return ret;
 }
 
 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index bc294b0..47030b4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
 	HCLGEVF_OPC_QUERY_TX_STATUS	= 0x0B03,
 	HCLGEVF_OPC_QUERY_RX_STATUS	= 0x0B13,
 	HCLGEVF_OPC_CFG_COM_TQP_QUEUE	= 0x0B20,
+	/* GRO command */
+	HCLGEVF_OPC_GRO_GENERIC_CONFIG  = 0x0C10,
 	/* RSS cmd */
 	HCLGEVF_OPC_RSS_GENERIC_CONFIG	= 0x0D01,
 	HCLGEVF_OPC_RSS_INPUT_TUPLE     = 0x0D02,
@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
 	__le16 rsv[7];
 };
 
+#define HCLGEVF_GRO_EN_B               0
+struct hclgevf_cfg_gro_status_cmd {
+	__le16 gro_en;
+	u8 rsv[22];
+};
+
 #define HCLGEVF_RSS_DEFAULT_OUTPORT_B	4
 #define HCLGEVF_RSS_HASH_KEY_OFFSET_B	4
 #define HCLGEVF_RSS_HASH_KEY_NUM	16
@@ -256,6 +264,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
 
 int hclgevf_cmd_init(struct hclgevf_dev *hdev);
 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
 
 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 085edb9..efec1b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2,6 +2,7 @@
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #include <linux/etherdevice.h>
+#include <linux/iopoll.h>
 #include <net/rtnetlink.h>
 #include "hclgevf_cmd.h"
 #include "hclgevf_main.h"
@@ -10,8 +11,7 @@
 
 #define HCLGEVF_NAME	"hclgevf"
 
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
-static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
 static struct hnae3_ae_algo ae_algovf;
 
 static const struct pci_device_id ae_algovf_pci_tbl[] = {
@@ -209,12 +209,6 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
 	struct hclgevf_tqp *tqp;
 	int i;
 
-	/* if this is on going reset then we need to re-allocate the TPQs
-	 * since we cannot assume we would get same number of TPQs back from PF
-	 */
-	if (hclgevf_dev_ongoing_reset(hdev))
-		devm_kfree(&hdev->pdev->dev, hdev->htqp);
-
 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
 				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
 	if (!hdev->htqp)
@@ -258,12 +252,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
 	new_tqps = kinfo->rss_size * kinfo->num_tc;
 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
 
-	/* if this is on going reset then we need to re-allocate the hnae queues
-	 * as well since number of TPQs from PF might have changed.
-	 */
-	if (hclgevf_dev_ongoing_reset(hdev))
-		devm_kfree(&hdev->pdev->dev, kinfo->tqp);
-
 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
 	if (!kinfo->tqp)
@@ -868,6 +856,9 @@ static int hclgevf_unmap_ring_from_vector(
 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 	int ret, vector_id;
 
+	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+		return 0;
+
 	vector_id = hclgevf_get_vector_index(hdev, vector);
 	if (vector_id < 0) {
 		dev_err(&handle->pdev->dev,
@@ -956,13 +947,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
 	return status;
 }
 
-static int hclgevf_get_queue_id(struct hnae3_queue *queue)
-{
-	struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
-
-	return tqp->index;
-}
-
 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
 {
 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -1097,38 +1081,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 				    2, true, NULL, 0);
 }
 
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+				    sizeof(new_mtu), true, NULL, 0);
+}
+
 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
 				 enum hnae3_reset_notify_type type)
 {
 	struct hnae3_client *client = hdev->nic_client;
 	struct hnae3_handle *handle = &hdev->nic;
+	int ret;
 
 	if (!client->ops->reset_notify)
 		return -EOPNOTSUPP;
 
-	return client->ops->reset_notify(handle, type);
+	ret = client->ops->reset_notify(handle, type);
+	if (ret)
+		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+			type, ret);
+
+	return ret;
+}
+
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
+static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
+				    unsigned long delay_us,
+				    unsigned long wait_cnt)
+{
+	unsigned long cnt = 0;
+
+	while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+	       cnt++ < wait_cnt)
+		usleep_range(delay_us, delay_us * 2);
+
+	if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+		dev_err(&hdev->pdev->dev,
+			"flr wait timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
 }
 
 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
 {
-#define HCLGEVF_RESET_WAIT_MS	500
-#define HCLGEVF_RESET_WAIT_CNT	20
-	u32 val, cnt = 0;
+#define HCLGEVF_RESET_WAIT_US	20000
+#define HCLGEVF_RESET_WAIT_CNT	2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
+	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+	u32 val;
+	int ret;
 
 	/* wait to check the hardware reset completion status */
-	val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
-	while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
-	       (cnt < HCLGEVF_RESET_WAIT_CNT)) {
-		msleep(HCLGEVF_RESET_WAIT_MS);
-		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
-		cnt++;
-	}
+	val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+	dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
+
+	if (hdev->reset_type == HNAE3_FLR_RESET)
+		return hclgevf_flr_poll_timeout(hdev,
+						HCLGEVF_RESET_WAIT_US,
+						HCLGEVF_RESET_WAIT_CNT);
+
+	ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
+				 !(val & HCLGEVF_RST_ING_BITS),
+				 HCLGEVF_RESET_WAIT_US,
+				 HCLGEVF_RESET_WAIT_TIMEOUT_US);
 
 	/* hardware completion status should be available by this time */
-	if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
-		dev_warn(&hdev->pdev->dev,
-			 "could'nt get reset done status from h/w, timeout!\n");
-		return -EBUSY;
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"could'nt get reset done status from h/w, timeout!\n");
+		return ret;
 	}
 
 	/* we will wait a bit more to let reset of the stack to complete. This
@@ -1145,10 +1178,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
 	int ret;
 
 	/* uninitialize the nic client */
-	hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+	if (ret)
+		return ret;
 
 	/* re-initialize the hclge device */
-	ret = hclgevf_init_hdev(hdev);
+	ret = hclgevf_reset_hdev(hdev);
 	if (ret) {
 		dev_err(&hdev->pdev->dev,
 			"hclge device re-init failed, VF is disabled!\n");
@@ -1156,22 +1191,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
 	}
 
 	/* bring up the nic client again */
-	hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+	if (ret)
+		return ret;
 
 	return 0;
 }
 
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+	int ret = 0;
+
+	switch (hdev->reset_type) {
+	case HNAE3_VF_FUNC_RESET:
+		ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
+					   0, true, NULL, sizeof(u8));
+		break;
+	case HNAE3_FLR_RESET:
+		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+		break;
+	default:
+		break;
+	}
+
+	set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
+		 hdev->reset_type, ret);
+
+	return ret;
+}
+
 static int hclgevf_reset(struct hclgevf_dev *hdev)
 {
+	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
 	int ret;
 
+	/* Initialize ae_dev reset status as well, in case enet layer wants to
+	 * know if device is undergoing reset
+	 */
+	ae_dev->reset_type = hdev->reset_type;
+	hdev->reset_count++;
 	rtnl_lock();
 
 	/* bring down the nic to stop any ongoing TX/RX */
-	hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+	if (ret)
+		goto err_reset_lock;
 
 	rtnl_unlock();
 
+	ret = hclgevf_reset_prepare_wait(hdev);
+	if (ret)
+		goto err_reset;
+
 	/* check if VF could successfully fetch the hardware reset completion
 	 * status from the hardware
 	 */
@@ -1181,58 +1254,118 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
 		dev_err(&hdev->pdev->dev,
 			"VF failed(=%d) to fetch H/W reset completion status\n",
 			ret);
-
-		dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
-		rtnl_lock();
-		hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-
-		rtnl_unlock();
-		return ret;
+		goto err_reset;
 	}
 
 	rtnl_lock();
 
 	/* now, re-initialize the nic client and ae device*/
 	ret = hclgevf_reset_stack(hdev);
-	if (ret)
+	if (ret) {
 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+		goto err_reset_lock;
+	}
 
 	/* bring up the nic to enable TX/RX again */
-	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+	ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+	if (ret)
+		goto err_reset_lock;
 
 	rtnl_unlock();
 
 	return ret;
+err_reset_lock:
+	rtnl_unlock();
+err_reset:
+	/* When VF reset failed, only the higher level reset asserted by PF
+	 * can restore it, so re-initialize the command queue to receive
+	 * this higher reset event.
+	 */
+	hclgevf_cmd_init(hdev);
+	dev_err(&hdev->pdev->dev, "failed to reset VF\n");
+
+	return ret;
 }
 
-static int hclgevf_do_reset(struct hclgevf_dev *hdev)
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
+						     unsigned long *addr)
 {
-	int status;
-	u8 respmsg;
+	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
 
-	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
-				      0, false, &respmsg, sizeof(u8));
-	if (status)
-		dev_err(&hdev->pdev->dev,
-			"VF reset request to PF failed(=%d)\n", status);
+	/* return the highest priority reset level amongst all */
+	if (test_bit(HNAE3_VF_RESET, addr)) {
+		rst_level = HNAE3_VF_RESET;
+		clear_bit(HNAE3_VF_RESET, addr);
+		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+		clear_bit(HNAE3_VF_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+		rst_level = HNAE3_VF_FULL_RESET;
+		clear_bit(HNAE3_VF_FULL_RESET, addr);
+		clear_bit(HNAE3_VF_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+		rst_level = HNAE3_VF_PF_FUNC_RESET;
+		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+		clear_bit(HNAE3_VF_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+		rst_level = HNAE3_VF_FUNC_RESET;
+		clear_bit(HNAE3_VF_FUNC_RESET, addr);
+	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
+		rst_level = HNAE3_FLR_RESET;
+		clear_bit(HNAE3_FLR_RESET, addr);
+	}
 
-	return status;
+	return rst_level;
 }
 
 static void hclgevf_reset_event(struct pci_dev *pdev,
 				struct hnae3_handle *handle)
 {
-	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+	struct hclgevf_dev *hdev = ae_dev->priv;
 
 	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
 
-	handle->reset_level = HNAE3_VF_RESET;
+	if (hdev->default_reset_request)
+		hdev->reset_level =
+			hclgevf_get_reset_level(hdev,
+						&hdev->default_reset_request);
+	else
+		hdev->reset_level = HNAE3_VF_FUNC_RESET;
 
 	/* reset of this VF requested */
 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
 	hclgevf_reset_task_schedule(hdev);
 
-	handle->last_reset_time = jiffies;
+	hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+					  enum hnae3_reset_type rst_type)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGEVF_FLR_WAIT_MS	100
+#define HCLGEVF_FLR_WAIT_CNT	50
+	struct hclgevf_dev *hdev = ae_dev->priv;
+	int cnt = 0;
+
+	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+	hclgevf_reset_event(hdev->pdev, NULL);
+
+	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+	       cnt++ < HCLGEVF_FLR_WAIT_CNT)
+		msleep(HCLGEVF_FLR_WAIT_MS);
+
+	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+		dev_err(&hdev->pdev->dev,
+			"flr wait down timeout: %d\n", cnt);
 }
 
 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1321,9 +1454,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
 		 */
 		hdev->reset_attempts = 0;
 
-		ret = hclgevf_reset(hdev);
-		if (ret)
-			dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
+		hdev->last_reset_time = jiffies;
+		while ((hdev->reset_type =
+			hclgevf_get_reset_level(hdev, &hdev->reset_pending))
+		       != HNAE3_NONE_RESET) {
+			ret = hclgevf_reset(hdev);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"VF stack reset failed %d.\n", ret);
+		}
 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
 				      &hdev->reset_state)) {
 		/* we could be here when either of below happens:
@@ -1352,19 +1491,17 @@ static void hclgevf_reset_service_task(struct work_struct *work)
 		 */
 		if (hdev->reset_attempts > 3) {
 			/* prepare for full reset of stack + pcie interface */
-			hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
+			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
 
 			/* "defer" schedule the reset task again */
 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
 		} else {
 			hdev->reset_attempts++;
 
-			/* request PF for resetting this VF via mailbox */
-			ret = hclgevf_do_reset(hdev);
-			if (ret)
-				dev_warn(&hdev->pdev->dev,
-					 "VF rst fail, stack will call\n");
+			set_bit(hdev->reset_level, &hdev->reset_pending);
+			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
 		}
+		hclgevf_reset_task_schedule(hdev);
 	}
 
 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
@@ -1386,6 +1523,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
 }
 
+static void hclgevf_keep_alive_timer(struct timer_list *t)
+{
+	struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
+
+	schedule_work(&hdev->keep_alive_task);
+	mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+}
+
+static void hclgevf_keep_alive_task(struct work_struct *work)
+{
+	struct hclgevf_dev *hdev;
+	u8 respmsg;
+	int ret;
+
+	hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+	ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
+				   0, false, &respmsg, sizeof(u8));
+	if (ret)
+		dev_err(&hdev->pdev->dev,
+			"VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
 static void hclgevf_service_task(struct work_struct *work)
 {
 	struct hclgevf_dev *hdev;
@@ -1407,24 +1566,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
 	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
 }
 
-static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+						      u32 *clearval)
 {
-	u32 cmdq_src_reg;
+	u32 cmdq_src_reg, rst_ing_reg;
 
 	/* fetch the events from their corresponding regs */
 	cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
 					HCLGEVF_VECTOR0_CMDQ_SRC_REG);
 
+	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+		dev_info(&hdev->pdev->dev,
+			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
+		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+		set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
+		*clearval = cmdq_src_reg;
+		return HCLGEVF_VECTOR0_EVENT_RST;
+	}
+
 	/* check for vector0 mailbox(=CMDQ RX) event source */
 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
 		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
 		*clearval = cmdq_src_reg;
-		return true;
+		return HCLGEVF_VECTOR0_EVENT_MBX;
 	}
 
 	dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
 
-	return false;
+	return HCLGEVF_VECTOR0_EVENT_OTHER;
 }
 
 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
@@ -1434,19 +1606,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
 
 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
 {
+	enum hclgevf_evt_cause event_cause;
 	struct hclgevf_dev *hdev = data;
 	u32 clearval;
 
 	hclgevf_enable_vector(&hdev->misc_vector, false);
-	if (!hclgevf_check_event_cause(hdev, &clearval))
-		goto skip_sched;
+	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
 
-	hclgevf_mbx_handler(hdev);
+	switch (event_cause) {
+	case HCLGEVF_VECTOR0_EVENT_RST:
+		hclgevf_reset_task_schedule(hdev);
+		break;
+	case HCLGEVF_VECTOR0_EVENT_MBX:
+		hclgevf_mbx_handler(hdev);
+		break;
+	default:
+		break;
+	}
 
-	hclgevf_clear_event_cause(hdev, clearval);
-
-skip_sched:
-	hclgevf_enable_vector(&hdev->misc_vector, true);
+	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+		hclgevf_clear_event_cause(hdev, clearval);
+		hclgevf_enable_vector(&hdev->misc_vector, true);
+	}
 
 	return IRQ_HANDLED;
 }
@@ -1504,6 +1685,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
 	return 0;
 }
 
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+	struct hclgevf_cfg_gro_status_cmd *req;
+	struct hclgevf_desc desc;
+	int ret;
+
+	if (!hnae3_dev_gro_supported(hdev))
+		return 0;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+				     false);
+	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+	req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		dev_err(&hdev->pdev->dev,
+			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+	return ret;
+}
+
 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
 {
 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
@@ -1566,21 +1770,7 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
 
 static int hclgevf_ae_start(struct hnae3_handle *handle)
 {
-	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-	int i, queue_id;
-
-	for (i = 0; i < kinfo->num_tqps; i++) {
-		/* ring enable */
-		queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
-		if (queue_id < 0) {
-			dev_warn(&hdev->pdev->dev,
-				 "Get invalid queue id, ignore it\n");
-			continue;
-		}
-
-		hclgevf_tqp_enable(hdev, queue_id, 0, true);
-	}
 
 	/* reset tqp stats */
 	hclgevf_reset_tqp_stats(handle);
@@ -1595,24 +1785,10 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
 
 static void hclgevf_ae_stop(struct hnae3_handle *handle)
 {
-	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-	int i, queue_id;
 
 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
 
-	for (i = 0; i < kinfo->num_tqps; i++) {
-		/* Ring disable */
-		queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
-		if (queue_id < 0) {
-			dev_warn(&hdev->pdev->dev,
-				 "Get invalid queue id, ignore it\n");
-			continue;
-		}
-
-		hclgevf_tqp_enable(hdev, queue_id, 0, false);
-	}
-
 	/* reset tqp stats */
 	hclgevf_reset_tqp_stats(handle);
 	del_timer_sync(&hdev->service_timer);
@@ -1621,12 +1797,40 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
 	hclgevf_update_link_status(hdev, 0);
 }
 
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 msg_data;
+
+	msg_data = alive ? 1 : 0;
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
+				    0, &msg_data, 1, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+	return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int ret;
+
+	ret = hclgevf_set_alive(handle, false);
+	if (ret)
+		dev_warn(&hdev->pdev->dev,
+			 "%s failed %d\n", __func__, ret);
+
+	del_timer_sync(&hdev->keep_alive_timer);
+	cancel_work_sync(&hdev->keep_alive_task);
+}
+
 static void hclgevf_state_init(struct hclgevf_dev *hdev)
 {
-	/* if this is on going reset then skip this initialization */
-	if (hclgevf_dev_ongoing_reset(hdev))
-		return;
-
 	/* setup tasks for the MBX */
 	INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
@@ -1668,10 +1872,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
 	int vectors;
 	int i;
 
-	/* if this is on going reset then skip this initialization */
-	if (hclgevf_dev_ongoing_reset(hdev))
-		return 0;
-
 	if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
 		vectors = pci_alloc_irq_vectors(pdev,
 						hdev->roce_base_msix_offset + 1,
@@ -1710,6 +1910,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
 					sizeof(int), GFP_KERNEL);
 	if (!hdev->vector_irq) {
+		devm_kfree(&pdev->dev, hdev->vector_status);
 		pci_free_irq_vectors(pdev);
 		return -ENOMEM;
 	}
@@ -1721,6 +1922,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
 {
 	struct pci_dev *pdev = hdev->pdev;
 
+	devm_kfree(&pdev->dev, hdev->vector_status);
+	devm_kfree(&pdev->dev, hdev->vector_irq);
 	pci_free_irq_vectors(pdev);
 }
 
@@ -1728,10 +1931,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
 {
 	int ret = 0;
 
-	/* if this is on going reset then skip this initialization */
-	if (hclgevf_dev_ongoing_reset(hdev))
-		return 0;
-
 	hclgevf_get_misc_vector(hdev);
 
 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
@@ -1861,14 +2060,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
 	struct hclgevf_hw *hw;
 	int ret;
 
-	/* check if we need to skip initialization of pci. This will happen if
-	 * device is undergoing VF reset. Otherwise, we would need to
-	 * re-initialize pci interface again i.e. when device is not going
-	 * through *any* reset or actually undergoing full reset.
-	 */
-	if (hclgevf_dev_ongoing_reset(hdev))
-		return 0;
-
 	ret = pci_enable_device(pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to enable PCI device\n");
@@ -1957,23 +2148,98 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
 	return 0;
 }
 
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int ret = 0;
+
+	if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+		hclgevf_misc_irq_uninit(hdev);
+		hclgevf_uninit_msi(hdev);
+		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+	}
+
+	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+		pci_set_master(pdev);
+		ret = hclgevf_init_msi(hdev);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"failed(%d) to init MSI/MSI-X\n", ret);
+			return ret;
+		}
+
+		ret = hclgevf_misc_irq_init(hdev);
+		if (ret) {
+			hclgevf_uninit_msi(hdev);
+			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+				ret);
+			return ret;
+		}
+
+		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+	}
+
+	return ret;
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int ret;
+
+	ret = hclgevf_pci_reset(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+		return ret;
+	}
+
+	ret = hclgevf_cmd_init(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "cmd failed %d\n", ret);
+		return ret;
+	}
+
+	ret = hclgevf_rss_init_hw(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize RSS\n", ret);
+		return ret;
+	}
+
+	ret = hclgevf_config_gro(hdev, true);
+	if (ret)
+		return ret;
+
+	ret = hclgevf_init_vlan_config(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize VLAN config\n", ret);
+		return ret;
+	}
+
+	dev_info(&hdev->pdev->dev, "Reset done\n");
+
+	return 0;
+}
+
 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
 {
 	struct pci_dev *pdev = hdev->pdev;
 	int ret;
 
-	/* check if device is on-going full reset(i.e. pcie as well) */
-	if (hclgevf_dev_ongoing_full_reset(hdev)) {
-		dev_warn(&pdev->dev, "device is going full reset\n");
-		hclgevf_uninit_hdev(hdev);
-	}
-
 	ret = hclgevf_pci_init(hdev);
 	if (ret) {
 		dev_err(&pdev->dev, "PCI initialization failed\n");
 		return ret;
 	}
 
+	ret = hclgevf_cmd_queue_init(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+		goto err_cmd_queue_init;
+	}
+
 	ret = hclgevf_cmd_init(hdev);
 	if (ret)
 		goto err_cmd_init;
@@ -1983,16 +2249,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
 	if (ret) {
 		dev_err(&hdev->pdev->dev,
 			"Query vf status error, ret = %d.\n", ret);
-		goto err_query_vf;
+		goto err_cmd_init;
 	}
 
 	ret = hclgevf_init_msi(hdev);
 	if (ret) {
 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
-		goto err_query_vf;
+		goto err_cmd_init;
 	}
 
 	hclgevf_state_init(hdev);
+	hdev->reset_level = HNAE3_VF_FUNC_RESET;
 
 	ret = hclgevf_misc_irq_init(hdev);
 	if (ret) {
@@ -2001,6 +2268,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
 		goto err_misc_irq_init;
 	}
 
+	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
 	ret = hclgevf_configure(hdev);
 	if (ret) {
 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -2019,6 +2288,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
 		goto err_config;
 	}
 
+	ret = hclgevf_config_gro(hdev, true);
+	if (ret)
+		goto err_config;
+
 	/* Initialize RSS for this VF */
 	ret = hclgevf_rss_init_hw(hdev);
 	if (ret) {
@@ -2034,6 +2307,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
 		goto err_config;
 	}
 
+	hdev->last_reset_time = jiffies;
 	pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
 
 	return 0;
@@ -2043,25 +2317,31 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
 err_misc_irq_init:
 	hclgevf_state_uninit(hdev);
 	hclgevf_uninit_msi(hdev);
-err_query_vf:
-	hclgevf_cmd_uninit(hdev);
 err_cmd_init:
+	hclgevf_cmd_uninit(hdev);
+err_cmd_queue_init:
 	hclgevf_pci_uninit(hdev);
+	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
 	return ret;
 }
 
 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
 {
 	hclgevf_state_uninit(hdev);
-	hclgevf_misc_irq_uninit(hdev);
+
+	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+		hclgevf_misc_irq_uninit(hdev);
+		hclgevf_uninit_msi(hdev);
+		hclgevf_pci_uninit(hdev);
+	}
+
 	hclgevf_cmd_uninit(hdev);
-	hclgevf_uninit_msi(hdev);
-	hclgevf_pci_uninit(hdev);
 }
 
 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 {
 	struct pci_dev *pdev = ae_dev->pdev;
+	struct hclgevf_dev *hdev;
 	int ret;
 
 	ret = hclgevf_alloc_hdev(ae_dev);
@@ -2071,10 +2351,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 	}
 
 	ret = hclgevf_init_hdev(ae_dev->priv);
-	if (ret)
+	if (ret) {
 		dev_err(&pdev->dev, "hclge device initialization failed\n");
+		return ret;
+	}
 
-	return ret;
+	hdev = ae_dev->priv;
+	timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
+	INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
+
+	return 0;
 }
 
 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -2151,6 +2437,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
 	hdev->hw.mac.duplex = duplex;
 }
 
+static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_config_gro(hdev, enable);
+}
+
 static void hclgevf_get_media_type(struct hnae3_handle *handle,
 				  u8 *media_type)
 {
@@ -2159,13 +2452,38 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
 		*media_type = hdev->hw.mac.media_type;
 }
 
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hdev->reset_count;
+}
+
 static const struct hnae3_ae_ops hclgevf_ops = {
 	.init_ae_dev = hclgevf_init_ae_dev,
 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
+	.flr_prepare = hclgevf_flr_prepare,
+	.flr_done = hclgevf_flr_done,
 	.init_client_instance = hclgevf_init_client_instance,
 	.uninit_client_instance = hclgevf_uninit_client_instance,
 	.start = hclgevf_ae_start,
 	.stop = hclgevf_ae_stop,
+	.client_start = hclgevf_client_start,
+	.client_stop = hclgevf_client_stop,
 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
 	.get_vector = hclgevf_get_vector,
@@ -2193,11 +2511,17 @@ static const struct hnae3_ae_ops hclgevf_ops = {
 	.set_vlan_filter = hclgevf_set_vlan_filter,
 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
 	.reset_event = hclgevf_reset_event,
+	.set_default_reset_request = hclgevf_set_def_reset_request,
 	.get_channels = hclgevf_get_channels,
 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
 	.get_status = hclgevf_get_status,
 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
 	.get_media_type = hclgevf_get_media_type,
+	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+	.ae_dev_resetting = hclgevf_ae_dev_resetting,
+	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+	.set_gro_en = hclgevf_gro_en,
+	.set_mtu = hclgevf_set_mtu,
 };
 
 static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index aed241e..4517b7e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -31,11 +31,19 @@
 #define HCLGEVF_VECTOR0_CMDQ_SRC_REG	0x27100
 /* CMDQ register bits for RX event(=MBX event) */
 #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B	1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B	2
 
 #define HCLGEVF_TQP_RESET_TRY_TIMES	10
 /* Reset related Registers */
-#define HCLGEVF_FUN_RST_ING		0x20C00
-#define HCLGEVF_FUN_RST_ING_B		0
+#define HCLGEVF_RST_ING			0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT		BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT	BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT	BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT		BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+	(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+	 HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
 
 #define HCLGEVF_RSS_IND_TBL_SIZE		512
 #define HCLGEVF_RSS_SET_BITMAP_MSK	0xffff
@@ -54,17 +62,25 @@
 #define HCLGEVF_S_IP_BIT		BIT(3)
 #define HCLGEVF_V_TAG_BIT		BIT(4)
 
+enum hclgevf_evt_cause {
+	HCLGEVF_VECTOR0_EVENT_RST,
+	HCLGEVF_VECTOR0_EVENT_MBX,
+	HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
 /* states of hclgevf device & tasks */
 enum hclgevf_states {
 	/* device states */
 	HCLGEVF_STATE_DOWN,
 	HCLGEVF_STATE_DISABLED,
+	HCLGEVF_STATE_IRQ_INITED,
 	/* task states */
 	HCLGEVF_STATE_SERVICE_SCHED,
 	HCLGEVF_STATE_RST_SERVICE_SCHED,
 	HCLGEVF_STATE_RST_HANDLING,
 	HCLGEVF_STATE_MBX_SERVICE_SCHED,
 	HCLGEVF_STATE_MBX_HANDLING,
+	HCLGEVF_STATE_CMD_DISABLE,
 };
 
 #define HCLGEVF_MPF_ENBALE 1
@@ -145,10 +161,17 @@ struct hclgevf_dev {
 	struct hclgevf_misc_vector misc_vector;
 	struct hclgevf_rss_cfg rss_cfg;
 	unsigned long state;
+	unsigned long flr_state;
+	unsigned long default_reset_request;
+	unsigned long last_reset_time;
+	enum hnae3_reset_type reset_level;
+	unsigned long reset_pending;
+	enum hnae3_reset_type reset_type;
 
 #define HCLGEVF_RESET_REQUESTED		0
 #define HCLGEVF_RESET_PENDING		1
 	unsigned long reset_state;	/* requested, pending */
+	unsigned long reset_count;	/* the number of reset has been done */
 	u32 reset_attempts;
 
 	u32 fw_version;
@@ -178,7 +201,9 @@ struct hclgevf_dev {
 	struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
 
 	struct timer_list service_timer;
+	struct timer_list keep_alive_timer;
 	struct work_struct service_task;
+	struct work_struct keep_alive_task;
 	struct work_struct rst_service_task;
 	struct work_struct mbx_service_task;
 
@@ -192,18 +217,9 @@ struct hclgevf_dev {
 	u32 flag;
 };
 
-static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
 {
-	return (hdev &&
-		(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
-		(hdev->nic.reset_level == HNAE3_VF_RESET));
-}
-
-static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
-{
-	return (hdev &&
-		(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
-		(hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
+	return !!hdev->reset_pending;
 }
 
 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index e9d5a4f..ef9c8e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
 	}
 
 	while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+		if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+			return -EIO;
+
 		udelay(HCLGEVF_SLEEP_USCOEND);
 		i++;
 	}
@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
 	crq = &hdev->hw.cmq.crq;
 
 	while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+		if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+			dev_info(&hdev->pdev->dev, "vf crq need init\n");
+			return;
+		}
+
 		desc = &crq->desc[crq->next_to_use];
 		req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
 
@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
 
 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
 {
+	enum hnae3_reset_type reset_type;
 	u16 link_status;
 	u16 *msg_q;
 	u8 duplex;
@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
 
 	/* process all the async queue messages */
 	while (tail != hdev->arq.head) {
+		if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+			dev_info(&hdev->pdev->dev,
+				 "vf crq need init in async\n");
+			return;
+		}
+
 		msg_q = hdev->arq.msg_q[hdev->arq.head];
 
 		switch (msg_q[0]) {
@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
 			 * has been completely reset. After this stack should
 			 * eventually be re-initialized.
 			 */
-			hdev->nic.reset_level = HNAE3_VF_RESET;
+			reset_type = le16_to_cpu(msg_q[1]);
+			set_bit(reset_type, &hdev->reset_pending);
 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
 			hclgevf_reset_task_schedule(hdev);
 
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 097b550..d1a7d25 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -50,6 +50,8 @@ enum hinic_port_cmd {
 
 	HINIC_PORT_CMD_GET_LINK_STATE   = 24,
 
+	HINIC_PORT_CMD_SET_RX_CSUM	= 26,
+
 	HINIC_PORT_CMD_SET_PORT_STATE   = 41,
 
 	HINIC_PORT_CMD_FWCTXT_INIT      = 69,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index f92f1bf..1dfa7eb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -74,12 +74,6 @@
 			((void *)((cmdq_pages)->shadow_page_vaddr) \
 				+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
 
-#define WQE_PAGE_OFF(wq, idx)   (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
-					(wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx)   (((idx) / ((wq)->num_wqebbs_per_page)) \
-					& ((wq)->num_q_pages - 1))
-
 #define WQ_PAGE_ADDR(wq, idx)           \
 			((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
 
@@ -93,6 +87,17 @@
 		(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
 			/ (wq)->max_wqe_size)
 
+static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
+{
+	return (((idx) & ((wq)->num_wqebbs_per_page - 1))
+		<< (wq)->wqebb_size_shift);
+}
+
+static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
+{
+	return (((idx) >> ((wq)->wqebbs_per_page_shift))
+		& ((wq)->num_q_pages - 1));
+}
 /**
  * queue_alloc_page - allocate page for Queue
  * @hwif: HW interface for allocating DMA
@@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
 	struct hinic_hwif *hwif = wqs->hwif;
 	struct pci_dev *pdev = hwif->pdev;
 	u16 num_wqebbs_per_page;
+	u16 wqebb_size_shift;
 	int err;
 
-	if (wqebb_size == 0) {
-		dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+	if (!is_power_of_2(wqebb_size)) {
+		dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
 		return -EINVAL;
 	}
 
@@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
 		return -EINVAL;
 	}
 
-	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+	wqebb_size_shift = ilog2(wqebb_size);
+	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+				>> wqebb_size_shift;
 
-	if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+	if (!is_power_of_2(num_wqebbs_per_page)) {
 		dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
 		return -EINVAL;
 	}
@@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
 	wq->q_depth = q_depth;
 	wq->max_wqe_size = max_wqe_size;
 	wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
+	wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+	wq->wqebb_size_shift = wqebb_size_shift;
 	wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
 	wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
 	wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
@@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
 			 u16 q_depth, u16 max_wqe_size)
 {
 	struct pci_dev *pdev = hwif->pdev;
+	u16 num_wqebbs_per_page_shift;
 	u16 num_wqebbs_per_page;
+	u16 wqebb_size_shift;
 	int i, j, err = -ENOMEM;
 
-	if (wqebb_size == 0) {
-		dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+	if (!is_power_of_2(wqebb_size)) {
+		dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
 		return -EINVAL;
 	}
 
@@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
 		return -EINVAL;
 	}
 
-	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+	wqebb_size_shift = ilog2(wqebb_size);
+	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+				>> wqebb_size_shift;
 
-	if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+	if (!is_power_of_2(num_wqebbs_per_page)) {
 		dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
 		return -EINVAL;
 	}
@@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
 		dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
 		return err;
 	}
+	num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
 
 	for (i = 0; i < cmdq_blocks; i++) {
 		wq[i].hwif = hwif;
@@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
 		wq[i].q_depth = q_depth;
 		wq[i].max_wqe_size = max_wqe_size;
 		wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
+		wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
+		wq[i].wqebb_size_shift = wqebb_size_shift;
 		wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
 		wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
 		wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
@@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
 
 	*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
 
-	num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+	num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
 
 	if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
 		atomic_add(num_wqebbs, &wq->delta);
@@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
  **/
 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
 {
-	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+			>> wq->wqebb_size_shift;
 
 	atomic_add(num_wqebbs, &wq->cons_idx);
 
@@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
 				    u16 *cons_idx)
 {
-	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+			>> wq->wqebb_size_shift;
 	u16 curr_cons_idx, end_cons_idx;
 	int curr_pg, end_pg;
 
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 9b66545..0a936cd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -39,7 +39,8 @@ struct hinic_wq {
 	u16             q_depth;
 	u16             max_wqe_size;
 	u16             num_wqebbs_per_page;
-
+	u16		wqebbs_per_page_shift;
+	u16		wqebb_size_shift;
 	/* The addresses are 64 bit in the HW */
 	u64             block_paddr;
 	void            **shadow_block_vaddr;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index 9754d6e..1389415 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -170,6 +170,10 @@
 
 #define HINIC_RQ_CQE_STATUS_RXDONE_MASK         0x1
 
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT	0
+
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK	0xFFFFU
+
 #define HINIC_RQ_CQE_STATUS_GET(val, member)    \
 		(((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
 		 HINIC_RQ_CQE_STATUS_##member##_MASK)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index fdf2bdb..6d48dc6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -600,9 +600,6 @@ static int add_mac_addr(struct net_device *netdev, const u8 *addr)
 	u16 vid = 0;
 	int err;
 
-	if (!is_valid_ether_addr(addr))
-		return -EADDRNOTAVAIL;
-
 	netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
 		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
 
@@ -726,6 +723,7 @@ static void set_rx_mode(struct work_struct *work)
 {
 	struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
 	struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
+	struct netdev_hw_addr *ha;
 
 	netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
 
@@ -733,6 +731,9 @@ static void set_rx_mode(struct work_struct *work)
 
 	__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
 	__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+
+	netdev_for_each_mc_addr(ha, nic_dev->netdev)
+		add_mac_addr(nic_dev->netdev, ha->addr);
 }
 
 static void hinic_set_rx_mode(struct net_device *netdev)
@@ -806,7 +807,8 @@ static const struct net_device_ops hinic_netdev_ops = {
 static void netdev_features_init(struct net_device *netdev)
 {
 	netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
-			      NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+			      NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+			      NETIF_F_RXCSUM;
 
 	netdev->vlan_features = netdev->hw_features;
 
@@ -869,12 +871,16 @@ static int set_features(struct hinic_dev *nic_dev,
 			netdev_features_t features, bool force_change)
 {
 	netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+	u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 	int err = 0;
 
 	if (changed & NETIF_F_TSO)
 		err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
 					 HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
 
+	if (changed & NETIF_F_RXCSUM)
+		err = hinic_set_rx_csum_offload(nic_dev, csum_en);
+
 	return err;
 }
 
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 7575a7d..122c935 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -409,3 +409,33 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
 
 	return 0;
 }
+
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
+{
+	struct hinic_checksum_offload rx_csum_cfg = {0};
+	struct hinic_hwdev *hwdev = nic_dev->hwdev;
+	struct hinic_hwif *hwif;
+	struct pci_dev *pdev;
+	u16 out_size;
+	int err;
+
+	if (!hwdev)
+		return -EINVAL;
+
+	hwif = hwdev->hwif;
+	pdev = hwif->pdev;
+	rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+	rx_csum_cfg.rx_csum_offload = en;
+
+	err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+				 &rx_csum_cfg, sizeof(rx_csum_cfg),
+				 &rx_csum_cfg, &out_size);
+	if (err || !out_size || rx_csum_cfg.status) {
+		dev_err(&pdev->dev,
+			"Failed to set rx csum offload, ret = %d\n",
+			rx_csum_cfg.status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index f6e3220..02d896e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -183,6 +183,15 @@ struct hinic_tso_config {
 	u8	resv2[3];
 };
 
+struct hinic_checksum_offload {
+	u8	status;
+	u8	version;
+	u8	rsvd0[6];
+
+	u16	func_id;
+	u16	rsvd1;
+	u32	rx_csum_offload;
+};
 int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
 		       u16 vlan_id);
 
@@ -213,4 +222,5 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
 
 int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
 
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
 #endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 4c0f7ed..f86f2e6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -89,6 +89,28 @@ static void rxq_stats_init(struct hinic_rxq *rxq)
 	hinic_rxq_clean_stats(rxq);
 }
 
+static void rx_csum(struct hinic_rxq *rxq, u16 cons_idx,
+		    struct sk_buff *skb)
+{
+	struct net_device *netdev = rxq->netdev;
+	struct hinic_rq_cqe *cqe;
+	struct hinic_rq *rq;
+	u32 csum_err;
+	u32 status;
+
+	rq = rxq->rq;
+	cqe = rq->cqe[cons_idx];
+	status = be32_to_cpu(cqe->status);
+	csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
+
+	if (!(netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	if (!csum_err)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	else
+		skb->ip_summed = CHECKSUM_NONE;
+}
 /**
  * rx_alloc_skb - allocate skb and map it to dma address
  * @rxq: rx queue
@@ -207,9 +229,9 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq)
 		wmb();  /* write all the wqes before update PI */
 
 		hinic_rq_update(rxq->rq, prod_idx);
+		tasklet_schedule(&rxq->rx_task);
 	}
 
-	tasklet_schedule(&rxq->rx_task);
 	return i;
 }
 
@@ -328,6 +350,8 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
 
 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
 
+		rx_csum(rxq, ci, skb);
+
 		prefetch(skb->data);
 
 		pkt_len = sge.len;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 27c9af4..ab3faba 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -23,6 +23,10 @@
 
 #include "hinic_hw_qp.h"
 
+#define HINIC_RX_CSUM_OFFLOAD_EN	0xFFF
+#define HINIC_RX_CSUM_HW_CHECK_NONE	BIT(7)
+#define HINIC_RX_CSUM_IPSU_OTHER_ERR	BIT(8)
+
 struct hinic_rxq_stats {
 	u64                     pkts;
 	u64                     bytes;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 760b2ad..2092554 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2455,7 +2455,8 @@ static void emac_adjust_link(struct net_device *ndev)
 	dev->phy.duplex = phy->duplex;
 	dev->phy.pause = phy->pause;
 	dev->phy.asym_pause = phy->asym_pause;
-	dev->phy.advertising = phy->advertising;
+	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
+						phy->advertising);
 }
 
 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
@@ -2490,7 +2491,8 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
 	phy_dev->autoneg = phy->autoneg;
 	phy_dev->speed = phy->speed;
 	phy_dev->duplex = phy->duplex;
-	phy_dev->advertising = phy->advertising;
+	ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
+						phy->advertising);
 	return phy_start_aneg(phy_dev);
 }
 
@@ -2624,7 +2626,8 @@ static int emac_dt_phy_connect(struct emac_instance *dev,
 	dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
 	dev->phy.def->name = dev->phy_dev->drv->name;
 	dev->phy.def->ops = &emac_dt_mdio_phy_ops;
-	dev->phy.features = dev->phy_dev->supported;
+	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
+						dev->phy_dev->supported);
 	dev->phy.address = dev->phy_dev->mdio.addr;
 	dev->phy.mode = dev->phy_dev->interface;
 	return 0;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 7c4b554..5e5c57d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2225,11 +2225,13 @@ static int e100_poll(struct napi_struct *napi, int budget)
 	e100_rx_clean(nic, &work_done, budget);
 	e100_tx_clean(nic);
 
-	/* If budget not fully consumed, exit the polling mode */
-	if (work_done < budget) {
-		napi_complete_done(napi, work_done);
+	/* If budget fully consumed, continue polling */
+	if (work_done == budget)
+		return budget;
+
+	/* only re-enable interrupt if stack agrees polling is really done */
+	if (likely(napi_complete_done(napi, work_done)))
 		e100_enable_irq(nic);
-	}
 
 	return work_done;
 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 43b6d3c..8fe9af0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3803,14 +3803,15 @@ static int e1000_clean(struct napi_struct *napi, int budget)
 
 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
 
-	if (!tx_clean_complete)
-		work_done = budget;
+	if (!tx_clean_complete || work_done == budget)
+		return budget;
 
-	/* If budget not fully consumed, exit the polling mode */
-	if (work_done < budget) {
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done))) {
 		if (likely(adapter->itr_setting & 3))
 			e1000_set_itr(adapter);
-		napi_complete_done(napi, work_done);
 		if (!test_bit(__E1000_DOWN, &adapter->flags))
 			e1000_irq_enable(adapter);
 	}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c760dc7..be13227 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -505,6 +505,9 @@ extern const struct e1000_info e1000_es2_info;
 void e1000e_ptp_init(struct e1000_adapter *adapter);
 void e1000e_ptp_remove(struct e1000_adapter *adapter);
 
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+		       struct ptp_system_timestamp *sts);
+
 static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
 	return hw->phy.ops.reset(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 16a73bd..308c006 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2651,9 +2651,9 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
 /**
  * e1000e_poll - NAPI Rx polling callback
  * @napi: struct associated with this polling callback
- * @weight: number of packets driver is allowed to process this poll
+ * @budget: number of packets driver is allowed to process this poll
  **/
-static int e1000e_poll(struct napi_struct *napi, int weight)
+static int e1000e_poll(struct napi_struct *napi, int budget)
 {
 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
 						     napi);
@@ -2667,16 +2667,17 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
 	    (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
 		tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
 
-	adapter->clean_rx(adapter->rx_ring, &work_done, weight);
+	adapter->clean_rx(adapter->rx_ring, &work_done, budget);
 
-	if (!tx_cleaned)
-		work_done = weight;
+	if (!tx_cleaned || work_done == budget)
+		return budget;
 
-	/* If weight not fully consumed, exit the polling mode */
-	if (work_done < weight) {
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done))) {
 		if (adapter->itr_setting & 3)
 			e1000_set_itr(adapter);
-		napi_complete_done(napi, work_done);
 		if (!test_bit(__E1000_DOWN, &adapter->state)) {
 			if (adapter->msix_entries)
 				ew32(IMS, adapter->rx_ring->ims_val);
@@ -4319,13 +4320,16 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
 /**
  * e1000e_sanitize_systim - sanitize raw cycle counter reads
  * @hw: pointer to the HW structure
- * @systim: time value read, sanitized and returned
+ * @systim: PHC time value read, sanitized and returned
+ * @sts: structure to hold system time before and after reading SYSTIML,
+ * may be NULL
  *
  * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
  * check to see that the time is incrementing at a reasonable
  * rate and is a multiple of incvalue.
  **/
-static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
+static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim,
+				  struct ptp_system_timestamp *sts)
 {
 	u64 time_delta, rem, temp;
 	u64 systim_next;
@@ -4335,7 +4339,9 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
 	incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
 	for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
 		/* latch SYSTIMH on read of SYSTIML */
+		ptp_read_system_prets(sts);
 		systim_next = (u64)er32(SYSTIML);
+		ptp_read_system_postts(sts);
 		systim_next |= (u64)er32(SYSTIMH) << 32;
 
 		time_delta = systim_next - systim;
@@ -4353,15 +4359,16 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
 }
 
 /**
- * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
- * @cc: cyclecounter structure
+ * e1000e_read_systim - read SYSTIM register
+ * @adapter: board private structure
+ * @sts: structure which will contain system time before and after reading
+ * SYSTIML, may be NULL
  **/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+		       struct ptp_system_timestamp *sts)
 {
-	struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
-						     cc);
 	struct e1000_hw *hw = &adapter->hw;
-	u32 systimel, systimeh;
+	u32 systimel, systimel_2, systimeh;
 	u64 systim;
 	/* SYSTIMH latching upon SYSTIML read does not work well.
 	 * This means that if SYSTIML overflows after we read it but before
@@ -4369,11 +4376,15 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
 	 * will experience a huge non linear increment in the systime value
 	 * to fix that we test for overflow and if true, we re-read systime.
 	 */
+	ptp_read_system_prets(sts);
 	systimel = er32(SYSTIML);
+	ptp_read_system_postts(sts);
 	systimeh = er32(SYSTIMH);
 	/* Is systimel is so large that overflow is possible? */
 	if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
-		u32 systimel_2 = er32(SYSTIML);
+		ptp_read_system_prets(sts);
+		systimel_2 = er32(SYSTIML);
+		ptp_read_system_postts(sts);
 		if (systimel > systimel_2) {
 			/* There was an overflow, read again SYSTIMH, and use
 			 * systimel_2
@@ -4386,12 +4397,24 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
 	systim |= (u64)systimeh << 32;
 
 	if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
-		systim = e1000e_sanitize_systim(hw, systim);
+		systim = e1000e_sanitize_systim(hw, systim, sts);
 
 	return systim;
 }
 
 /**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+	struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+						     cc);
+
+	return e1000e_read_systim(adapter, NULL);
+}
+
+/**
  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
  * @adapter: board private structure to initialize
  *
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 37c7694..1a4c65d 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -161,22 +161,30 @@ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
 #endif/*CONFIG_E1000E_HWTS*/
 
 /**
- * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * e1000e_phc_gettimex - Reads the current time from the hardware clock and
+ *                       system clock
  * @ptp: ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec structure to hold the current PHC time
+ * @sts: structure to hold the current system time
  *
  * Read the timecounter and return the correct value in ns after converting
  * it into a struct timespec.
  **/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int e1000e_phc_gettimex(struct ptp_clock_info *ptp,
+			       struct timespec64 *ts,
+			       struct ptp_system_timestamp *sts)
 {
 	struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
 						     ptp_clock_info);
 	unsigned long flags;
-	u64 ns;
+	u64 cycles, ns;
 
 	spin_lock_irqsave(&adapter->systim_lock, flags);
-	ns = timecounter_read(&adapter->tc);
+
+	/* NOTE: Non-monotonic SYSTIM readings may be returned */
+	cycles = e1000e_read_systim(adapter, sts);
+	ns = timecounter_cyc2time(&adapter->tc, cycles);
+
 	spin_unlock_irqrestore(&adapter->systim_lock, flags);
 
 	*ts = ns_to_timespec64(ns);
@@ -232,9 +240,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
 						     systim_overflow_work.work);
 	struct e1000_hw *hw = &adapter->hw;
 	struct timespec64 ts;
+	u64 ns;
 
-	adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+	/* Update the timecounter */
+	ns = timecounter_read(&adapter->tc);
 
+	ts = ns_to_timespec64(ns);
 	e_dbg("SYSTIM overflow check at %lld.%09lu\n",
 	      (long long) ts.tv_sec, ts.tv_nsec);
 
@@ -251,7 +262,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
 	.pps		= 0,
 	.adjfreq	= e1000e_phc_adjfreq,
 	.adjtime	= e1000e_phc_adjtime,
-	.gettime64	= e1000e_phc_gettime,
+	.gettimex64	= e1000e_phc_gettimex,
 	.settime64	= e1000e_phc_settime,
 	.enable		= e1000e_phc_enable,
 };
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5b2a50e..6fd15a7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1465,11 +1465,11 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
 	if (!clean_complete)
 		return budget;
 
-	/* all work done, exit the polling mode */
-	napi_complete_done(napi, work_done);
-
-	/* re-enable the q_vector */
-	fm10k_qv_enable(q_vector);
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done)))
+		fm10k_qv_enable(q_vector);
 
 	return min(work_done, budget - 1);
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 876cac3..8de9085 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -122,6 +122,7 @@ enum i40e_state_t {
 	__I40E_MDD_EVENT_PENDING,
 	__I40E_VFLR_EVENT_PENDING,
 	__I40E_RESET_RECOVERY_PENDING,
+	__I40E_TIMEOUT_RECOVERY_PENDING,
 	__I40E_MISC_IRQ_REQUESTED,
 	__I40E_RESET_INTR_RECEIVED,
 	__I40E_REINIT_REQUESTED,
@@ -146,6 +147,7 @@ enum i40e_state_t {
 	__I40E_CLIENT_SERVICE_REQUESTED,
 	__I40E_CLIENT_L2_CHANGE,
 	__I40E_CLIENT_RESET,
+	__I40E_VIRTCHNL_OP_PENDING,
 	/* This must be last as it determines the size of the BITMAP */
 	__I40E_STATE_SIZE__,
 };
@@ -494,7 +496,6 @@ struct i40e_pf {
 #define I40E_HW_STOP_FW_LLDP			BIT(16)
 #define I40E_HW_PORT_ID_VALID			BIT(17)
 #define I40E_HW_RESTART_AUTONEG			BIT(18)
-#define I40E_HW_STOPPABLE_FW_LLDP		BIT(19)
 
 	u32 flags;
 #define I40E_FLAG_RX_CSUM_ENABLED		BIT(0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 501ee71..7ab61f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -588,6 +588,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 	    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
 	    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
 		hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+		hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+	}
+	if (hw->mac.type == I40E_MAC_X722 &&
+	    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+	    hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
+		hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
 	}
 
 	/* Newer versions of firmware require lock when reading the NVM */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 80e3eec..1150610 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,7 +11,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR_X722	0x0005
+#define I40E_FW_API_VERSION_MINOR_X722	0x0006
 #define I40E_FW_API_VERSION_MINOR_X710	0x0007
 
 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
@@ -20,6 +20,8 @@
 
 /* API version 1.7 implements additional link and PHY-specific APIs  */
 #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
 
 struct i40e_aq_desc {
 	__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 85f75b5..97a9b1f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3723,6 +3723,9 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
 		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
 	i40e_status status;
 
+	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_set_dcb_parameters);
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9f8464f..a6bc784 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -906,6 +906,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
 		ks->base.speed = SPEED_100;
 		break;
 	default:
+		ks->base.speed = SPEED_UNKNOWN;
 		break;
 	}
 	ks->base.duplex = DUPLEX_FULL;
@@ -1335,6 +1336,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 	i40e_status status;
 	u8 aq_failures;
 	int err = 0;
+	u32 is_an;
 
 	/* Changing the port's flow control is not supported if this isn't the
 	 * port's controlling PF
@@ -1347,15 +1349,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 	if (vsi != pf->vsi[pf->lan_vsi])
 		return -EOPNOTSUPP;
 
-	if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
-	    AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+	is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
+	if (pause->autoneg != is_an) {
 		netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
 		return -EOPNOTSUPP;
 	}
 
 	/* If we have link and don't have autoneg */
-	if (!test_bit(__I40E_DOWN, pf->state) &&
-	    !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+	if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
 		/* Send message that it might not necessarily work*/
 		netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
 	}
@@ -1406,7 +1407,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 		err = -EAGAIN;
 	}
 
-	if (!test_bit(__I40E_DOWN, pf->state)) {
+	if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
 		/* Give it a little more time to try to come back */
 		msleep(75);
 		if (!test_bit(__I40E_DOWN, pf->state))
@@ -2377,7 +2378,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 		return -EOPNOTSUPP;
 
 	/* only magic packet is supported */
-	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
+			  | (wol->wolopts != WAKE_FILTER))
 		return -EOPNOTSUPP;
 
 	/* is this a new value? */
@@ -4659,14 +4661,15 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
 		return -EOPNOTSUPP;
 
 	/* If the driver detected FW LLDP was disabled on init, this flag could
-	 * be set, however we do not support _changing_ the flag if NPAR is
-	 * enabled or FW API version < 1.7.  There are situations where older
-	 * FW versions/NPAR enabled PFs could disable LLDP, however we _must_
-	 * not allow the user to enable/disable LLDP with this flag on
-	 * unsupported FW versions.
+	 * be set, however we do not support _changing_ the flag:
+	 * - on XL710 if NPAR is enabled or FW API version < 1.7
+	 * - on X722 with FW API version < 1.6
+	 * There are situations where older FW versions/NPAR enabled PFs could
+	 * disable LLDP, however we _must_ not allow the user to enable/disable
+	 * LLDP with this flag on unsupported FW versions.
 	 */
 	if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
-		if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) {
+		if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
 			dev_warn(&pf->pdev->dev,
 				 "Device does not support changing FW LLDP\n");
 			return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 21c2688..47f0fda 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_MINOR 7
+#define DRV_VERSION_BUILD 6
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -338,6 +338,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
 		return;   /* don't do any new action before the next timeout */
 
+	/* don't kick off another recovery if one is already pending */
+	if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+		return;
+
 	if (tx_ring) {
 		head = i40e_get_head(tx_ring);
 		/* Read interrupt register */
@@ -1493,8 +1497,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
 	bool found = false;
 	int bkt;
 
-	WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
-	     "Missing mac_filter_hash_lock\n");
+	lockdep_assert_held(&vsi->mac_filter_hash_lock);
 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
 		if (ether_addr_equal(macaddr, f->macaddr)) {
 			__i40e_del_filter(vsi, f);
@@ -9632,6 +9635,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
 	clear_bit(__I40E_RESET_FAILED, pf->state);
 clear_recovery:
 	clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+	clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
 }
 
 /**
@@ -11332,16 +11336,15 @@ static int i40e_sw_init(struct i40e_pf *pf)
 		/* IWARP needs one extra vector for CQP just like MISC.*/
 		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
 	}
-	/* Stopping the FW LLDP engine is only supported on the
-	 * XL710 with a FW ver >= 1.7.  Also, stopping FW LLDP
-	 * engine is not supported if NPAR is functioning on this
-	 * part
+	/* Stopping FW LLDP engine is supported on XL710 and X722
+	 * starting from FW versions determined in i40e_init_adminq.
+	 * Stopping the FW LLDP engine is not supported on XL710
+	 * if NPAR is functioning so unset this hw flag in this case.
 	 */
 	if (pf->hw.mac.type == I40E_MAC_XL710 &&
-	    !pf->hw.func_caps.npar_enable &&
-	    (pf->hw.aq.api_maj_ver > 1 ||
-	     (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
-		pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
+	    pf->hw.func_caps.npar_enable &&
+	    (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+		pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
 
 #ifdef CONFIG_PCI_IOV
 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
@@ -14302,23 +14305,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 		switch (hw->bus.speed) {
 		case i40e_bus_speed_8000:
-			strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+			strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
 		case i40e_bus_speed_5000:
-			strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+			strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
 		case i40e_bus_speed_2500:
-			strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+			strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
 		default:
 			break;
 		}
 		switch (hw->bus.width) {
 		case i40e_bus_width_pcie_x8:
-			strncpy(width, "8", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "8", PCI_WIDTH_SIZE); break;
 		case i40e_bus_width_pcie_x4:
-			strncpy(width, "4", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "4", PCI_WIDTH_SIZE); break;
 		case i40e_bus_width_pcie_x2:
-			strncpy(width, "2", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "2", PCI_WIDTH_SIZE); break;
 		case i40e_bus_width_pcie_x1:
-			strncpy(width, "1", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "1", PCI_WIDTH_SIZE); break;
 		default:
 			break;
 		}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 1199f05..e6fc0af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -694,7 +694,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
 	if (!IS_ERR_OR_NULL(pf->ptp_clock))
 		return 0;
 
-	strncpy(pf->ptp_caps.name, i40e_driver_name,
+	strlcpy(pf->ptp_caps.name, i40e_driver_name,
 		sizeof(pf->ptp_caps.name) - 1);
 	pf->ptp_caps.owner = THIS_MODULE;
 	pf->ptp_caps.max_adj = 999999999;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index aef3c89..a0b1575 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2667,10 +2667,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
 		q_vector->arm_wb_state = false;
 
-	/* Work is done so exit the polling mode and re-enable the interrupt */
-	napi_complete_done(napi, work_done);
-
-	i40e_update_enable_itr(vsi, q_vector);
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done)))
+		i40e_update_enable_itr(vsi, q_vector);
 
 	return min(work_done, budget - 1);
 }
@@ -3473,6 +3474,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	tx_desc->cmd_type_offset_bsz =
 			build_ctob(td_cmd, td_offset, size, td_tag);
 
+	skb_tx_timestamp(skb);
+
 	/* Force memory writes to complete before letting h/w know there
 	 * are new descriptors to fetch.
 	 *
@@ -3526,6 +3529,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
 	u16 i = xdp_ring->next_to_use;
 	struct i40e_tx_buffer *tx_bi;
 	struct i40e_tx_desc *tx_desc;
+	void *data = xdpf->data;
 	u32 size = xdpf->len;
 	dma_addr_t dma;
 
@@ -3533,8 +3537,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
 		xdp_ring->tx_stats.tx_busy++;
 		return I40E_XDP_CONSUMED;
 	}
-
-	dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
+	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
 	if (dma_mapping_error(xdp_ring->dev, dma))
 		return I40E_XDP_CONSUMED;
 
@@ -3652,8 +3655,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 	if (tsyn)
 		tx_flags |= I40E_TX_FLAGS_TSYN;
 
-	skb_tx_timestamp(skb);
-
 	/* always enable CRC insertion offload */
 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 7df969c..2781ab9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -615,6 +615,7 @@ struct i40e_hw {
 #define I40E_HW_FLAG_802_1AD_CAPABLE        BIT_ULL(1)
 #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE  BIT_ULL(2)
 #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE      BIT_ULL(4)
 	u64 flags;
 
 	/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index ac5698e..2ac23eb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1112,7 +1112,8 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
 	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
 		return I40E_ERR_PARAM;
 
-	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+	    (allmulti || alluni)) {
 		dev_err(&pf->pdev->dev,
 			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
 			vf->vf_id);
@@ -1675,13 +1676,20 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 {
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
 
 	if (num_vfs) {
 		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
 			i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
 		}
-		return i40e_pci_sriov_enable(pdev, num_vfs);
+		ret = i40e_pci_sriov_enable(pdev, num_vfs);
+		goto sriov_configure_out;
 	}
 
 	if (!pci_vfs_assigned(pf->pdev)) {
@@ -1690,9 +1698,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 		i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
 	} else {
 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto sriov_configure_out;
 	}
-	return 0;
+sriov_configure_out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
 }
 
 /***********************virtual channel routines******************/
@@ -3893,6 +3904,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 		goto error_param;
 	}
 
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
 	if (is_multicast_ether_addr(mac)) {
 		dev_err(&pf->pdev->dev,
 			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
@@ -3941,6 +3957,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
 
 error_param:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
@@ -3992,6 +4009,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
 	struct i40e_vf *vf;
 	int ret = 0;
 
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
 	/* validate the request */
 	ret = i40e_validate_vf(pf, vf_id);
 	if (ret)
@@ -4107,6 +4129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
 	ret = 0;
 
 error_pvid:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
@@ -4128,6 +4151,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
 	struct i40e_vf *vf;
 	int ret = 0;
 
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
 	/* validate the request */
 	ret = i40e_validate_vf(pf, vf_id);
 	if (ret)
@@ -4154,6 +4182,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
 
 	vf->tx_rate = max_tx_rate;
 error:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
@@ -4174,6 +4203,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 	struct i40e_vf *vf;
 	int ret = 0;
 
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
 	/* validate the request */
 	ret = i40e_validate_vf(pf, vf_id);
 	if (ret)
@@ -4209,6 +4243,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 	ret = 0;
 
 error_param:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
@@ -4230,6 +4265,11 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
 	int abs_vf_id;
 	int ret = 0;
 
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
 	/* validate the request */
 	if (vf_id >= pf->num_alloc_vfs) {
 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4273,6 +4313,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
 
 error_out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
@@ -4294,6 +4335,11 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
 	struct i40e_vf *vf;
 	int ret = 0;
 
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
 	/* validate the request */
 	if (vf_id >= pf->num_alloc_vfs) {
 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4327,6 +4373,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
 		ret = -EIO;
 	}
 out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
@@ -4345,15 +4392,22 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
 	struct i40e_vf *vf;
 	int ret = 0;
 
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
 	/* validate the request */
 	if (vf_id >= pf->num_alloc_vfs) {
 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
 		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	vf = &pf->vf[vf_id];
@@ -4376,5 +4430,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
 	}
 
 out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index bf67d62..f962102 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -13,9 +13,9 @@
 #define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED	3
 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED	10
 
-#define I40E_VLAN_PRIORITY_SHIFT	12
+#define I40E_VLAN_PRIORITY_SHIFT	13
 #define I40E_VLAN_MASK			0xFFF
-#define I40E_PRIORITY_MASK		0x7000
+#define I40E_PRIORITY_MASK		0xE000
 
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index fb9bfad..9b4d7ce 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1761,10 +1761,11 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
 	if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
 		q_vector->arm_wb_state = false;
 
-	/* Work is done so exit the polling mode and re-enable the interrupt */
-	napi_complete_done(napi, work_done);
-
-	iavf_update_enable_itr(vsi, q_vector);
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done)))
+		iavf_update_enable_itr(vsi, q_vector);
 
 	return min(work_done, budget - 1);
 }
@@ -2343,6 +2344,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
 	tx_desc->cmd_type_offset_bsz =
 			build_ctob(td_cmd, td_offset, size, td_tag);
 
+	skb_tx_timestamp(skb);
+
 	/* Force memory writes to complete before letting h/w know there
 	 * are new descriptors to fetch.
 	 *
@@ -2461,8 +2464,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
 	if (tso < 0)
 		goto out_drop;
 
-	skb_tx_timestamp(skb);
-
 	/* always enable CRC insertion offload */
 	td_cmd |= IAVF_TX_DESC_CMD_ICRC;
 
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b854837..a385575 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -52,7 +52,6 @@ extern const char ice_drv_ver[];
 #define ICE_MBXQ_LEN		64
 #define ICE_MIN_MSIX		2
 #define ICE_NO_VSI		0xffff
-#define ICE_MAX_VSI_ALLOC	130
 #define ICE_MAX_TXQS		2048
 #define ICE_MAX_RXQS		2048
 #define ICE_VSI_MAP_CONTIG	0
@@ -97,14 +96,14 @@ extern const char ice_drv_ver[];
 #define ice_for_each_vsi(pf, i) \
 	for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
 
-/* Macros for each tx/rx ring in a VSI */
+/* Macros for each Tx/Rx ring in a VSI */
 #define ice_for_each_txq(vsi, i) \
 	for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
 
 #define ice_for_each_rxq(vsi, i) \
 	for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
 
-/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
 #define ice_for_each_alloc_txq(vsi, i) \
 	for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
 
@@ -113,7 +112,9 @@ extern const char ice_drv_ver[];
 
 struct ice_tc_info {
 	u16 qoffset;
-	u16 qcount;
+	u16 qcount_tx;
+	u16 qcount_rx;
+	u8 netdev_tc;
 };
 
 struct ice_tc_cfg {
@@ -149,10 +150,10 @@ enum ice_state {
 	__ICE_RESET_FAILED,		/* set by reset/rebuild */
 	/* When checking for the PF to be in a nominal operating state, the
 	 * bits that are grouped at the beginning of the list need to be
-	 * checked.  Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
-	 * be checked.  If you need to add a bit into consideration for nominal
+	 * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+	 * be checked. If you need to add a bit into consideration for nominal
 	 * operating state, it must be added before
-	 * __ICE_STATE_NOMINAL_CHECK_BITS.  Do not move this entry's position
+	 * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
 	 * without appropriate consideration.
 	 */
 	__ICE_STATE_NOMINAL_CHECK_BITS,
@@ -182,8 +183,8 @@ struct ice_vsi {
 	struct ice_sw *vsw;		 /* switch this VSI is on */
 	struct ice_pf *back;		 /* back pointer to PF */
 	struct ice_port_info *port_info; /* back pointer to port_info */
-	struct ice_ring **rx_rings;	 /* rx ring array */
-	struct ice_ring **tx_rings;	 /* tx ring array */
+	struct ice_ring **rx_rings;	 /* Rx ring array */
+	struct ice_ring **tx_rings;	 /* Tx ring array */
 	struct ice_q_vector **q_vectors; /* q_vector array */
 
 	irqreturn_t (*irq_handler)(int irq, void *data);
@@ -200,8 +201,8 @@ struct ice_vsi {
 	int sw_base_vector;		/* Irq base for OS reserved vectors */
 	int hw_base_vector;		/* HW (absolute) index of a vector */
 	enum ice_vsi_type type;
-	u16 vsi_num;			 /* HW (absolute) index of this VSI */
-	u16 idx;			 /* software index in pf->vsi[] */
+	u16 vsi_num;			/* HW (absolute) index of this VSI */
+	u16 idx;			/* software index in pf->vsi[] */
 
 	/* Interrupt thresholds */
 	u16 work_lmt;
@@ -254,8 +255,8 @@ struct ice_q_vector {
 	struct ice_ring_container tx;
 	struct irq_affinity_notify affinity_notify;
 	u16 v_idx;			/* index in the vsi->q_vector array. */
-	u8 num_ring_tx;			/* total number of tx rings in vector */
-	u8 num_ring_rx;			/* total number of rx rings in vector */
+	u8 num_ring_tx;			/* total number of Tx rings in vector */
+	u8 num_ring_rx;			/* total number of Rx rings in vector */
 	char name[ICE_INT_NAME_STR_LEN];
 	/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
 	 * value to the device
@@ -307,10 +308,10 @@ struct ice_pf {
 	u32 hw_oicr_idx;	/* Other interrupt cause vector HW index */
 	u32 num_avail_hw_msix;	/* remaining HW MSIX vectors left unclaimed */
 	u32 num_lan_msix;	/* Total MSIX vectors for base driver */
-	u16 num_lan_tx;		/* num lan tx queues setup */
-	u16 num_lan_rx;		/* num lan rx queues setup */
-	u16 q_left_tx;		/* remaining num tx queues left unclaimed */
-	u16 q_left_rx;		/* remaining num rx queues left unclaimed */
+	u16 num_lan_tx;		/* num lan Tx queues setup */
+	u16 num_lan_rx;		/* num lan Rx queues setup */
+	u16 q_left_tx;		/* remaining num Tx queues left unclaimed */
+	u16 q_left_rx;		/* remaining num Rx queues left unclaimed */
 	u16 next_vsi;		/* Next free slot in pf->vsi[] - 0-based! */
 	u16 num_alloc_vsi;
 	u16 corer_count;	/* Core reset count */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6653555..fcdcd80 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -5,7 +5,7 @@
 #define _ICE_ADMINQ_CMD_H_
 
 /* This header file defines the Admin Queue commands, error codes and
- * descriptor format.  It is shared between Firmware and Software.
+ * descriptor format. It is shared between Firmware and Software.
  */
 
 #define ICE_MAX_VSI			768
@@ -87,6 +87,7 @@ struct ice_aqc_list_caps {
 /* Device/Function buffer entry, repeated per reported capability */
 struct ice_aqc_list_caps_elem {
 	__le16 cap;
+#define ICE_AQC_CAPS_VALID_FUNCTIONS			0x0005
 #define ICE_AQC_CAPS_SRIOV				0x0012
 #define ICE_AQC_CAPS_VF					0x0013
 #define ICE_AQC_CAPS_VSI				0x0017
@@ -462,7 +463,7 @@ struct ice_aqc_sw_rules {
 };
 
 /* Add/Update/Get/Remove lookup Rx/Tx command/response entry
- * This structures describes the lookup rules and associated actions.  "index"
+ * This structures describes the lookup rules and associated actions. "index"
  * is returned as part of a response to a successful Add command, and can be
  * used to identify the rule for Update/Get/Remove commands.
  */
@@ -1065,10 +1066,10 @@ struct ice_aqc_nvm {
 #define ICE_AQC_NVM_LAST_CMD		BIT(0)
 #define ICE_AQC_NVM_PCIR_REQ		BIT(0)	/* Used by NVM Update reply */
 #define ICE_AQC_NVM_PRESERVATION_S	1
-#define ICE_AQC_NVM_PRESERVATION_M	(3 << CSR_AQ_NVM_PRESERVATION_S)
-#define ICE_AQC_NVM_NO_PRESERVATION	(0 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVATION_M	(3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION	(0 << ICE_AQC_NVM_PRESERVATION_S)
 #define ICE_AQC_NVM_PRESERVE_ALL	BIT(1)
-#define ICE_AQC_NVM_PRESERVE_SELECTED	(3 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_SELECTED	(3 << ICE_AQC_NVM_PRESERVATION_S)
 #define ICE_AQC_NVM_FLASH_ONLY		BIT(7)
 	__le16 module_typeid;
 	__le16 length;
@@ -1110,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys {
 };
 
 /* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
-struct  ice_aqc_get_set_rss_lut {
+struct ice_aqc_get_set_rss_lut {
 #define ICE_AQC_GSET_RSS_LUT_VSI_VALID	BIT(15)
 #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S	0
 #define ICE_AQC_GSET_RSS_LUT_VSI_ID_M	(0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
@@ -1314,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log {
  * @params: command-specific parameters
  *
  * Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ).  The firmware writes back onto the command descriptor and returns
- * the result of the command.  Asynchronous events that are not an immediate
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
  * result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format.  Descriptors are in little-endian notation with
+ * the same descriptor format. Descriptors are in little-endian notation with
  * 32-bit words.
  */
 struct ice_aq_desc {
@@ -1379,10 +1380,10 @@ struct ice_aq_desc {
 
 /* error codes */
 enum ice_aq_err {
-	ICE_AQ_RC_OK		= 0,  /* success */
+	ICE_AQ_RC_OK		= 0,  /* Success */
 	ICE_AQ_RC_ENOMEM	= 9,  /* Out of memory */
 	ICE_AQ_RC_EBUSY		= 12, /* Device or resource busy */
-	ICE_AQ_RC_EEXIST	= 13, /* object already exists */
+	ICE_AQ_RC_EEXIST	= 13, /* Object already exists */
 	ICE_AQ_RC_ENOSPC	= 16, /* No space left or allocation failure */
 };
 
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 554fd70..4c1d35d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
 
 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
 
-	ice_init_def_sw_recp(hw);
-
-	return 0;
+	return ice_init_def_sw_recp(hw);
 }
 
 /**
@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
 
 	hw->evb_veb = true;
 
-	/* Query the allocated resources for tx scheduler */
+	/* Query the allocated resources for Tx scheduler */
 	status = ice_sched_query_res_alloc(hw);
 	if (status) {
 		ice_debug(hw, ICE_DBG_SCHED,
@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
  * ice_copy_rxq_ctx_to_hw
  * @hw: pointer to the hardware structure
  * @ice_rxq_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
  *
  * Copies rxq context from dense structure to hw register space
  */
@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
  * ice_write_rxq_ctx
  * @hw: pointer to the hardware structure
  * @rlan_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
  *
  * Converts rxq context from sparse to dense structure and then writes
  * it to hw register space
@@ -1387,6 +1385,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
 }
 
 /**
+ * ice_get_guar_num_vsi - determine number of guar VSI for a PF
+ * @hw: pointer to the hw structure
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of VSI per PF.
+ */
+static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+{
+	u8 funcs;
+
+#define ICE_CAPS_VALID_FUNCS_M	0xFF
+	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+			 ICE_CAPS_VALID_FUNCS_M);
+
+	if (!funcs)
+		return 0;
+
+	return ICE_MAX_VSI / funcs;
+}
+
+/**
  * ice_parse_caps - parse function/device capabilities
  * @hw: pointer to the hw struct
  * @buf: pointer to a buffer containing function/device capability records
@@ -1428,6 +1447,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
 		u16 cap = le16_to_cpu(cap_resp->cap);
 
 		switch (cap) {
+		case ICE_AQC_CAPS_VALID_FUNCTIONS:
+			caps->valid_functions = number;
+			ice_debug(hw, ICE_DBG_INIT,
+				  "HW caps: Valid Functions = %d\n",
+				  caps->valid_functions);
+			break;
 		case ICE_AQC_CAPS_SRIOV:
 			caps->sr_iov_1_1 = (number == 1);
 			ice_debug(hw, ICE_DBG_INIT,
@@ -1457,10 +1482,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
 					  "HW caps: Dev.VSI cnt = %d\n",
 					  dev_p->num_vsi_allocd_to_host);
 			} else if (func_p) {
-				func_p->guaranteed_num_vsi = number;
+				func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
 				ice_debug(hw, ICE_DBG_INIT,
 					  "HW caps: Func.VSI cnt = %d\n",
-					  func_p->guaranteed_num_vsi);
+					  number);
 			}
 			break;
 		case ICE_AQC_CAPS_RSS:
@@ -1688,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
  */
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
 {
 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
 
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 84c9672..2bf5e11 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -3,6 +3,26 @@
 
 #include "ice_common.h"
 
+#define ICE_CQ_INIT_REGS(qinfo, prefix)				\
+do {								\
+	(qinfo)->sq.head = prefix##_ATQH;			\
+	(qinfo)->sq.tail = prefix##_ATQT;			\
+	(qinfo)->sq.len = prefix##_ATQLEN;			\
+	(qinfo)->sq.bah = prefix##_ATQBAH;			\
+	(qinfo)->sq.bal = prefix##_ATQBAL;			\
+	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
+	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
+	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
+	(qinfo)->rq.head = prefix##_ARQH;			\
+	(qinfo)->rq.tail = prefix##_ARQT;			\
+	(qinfo)->rq.len = prefix##_ARQLEN;			\
+	(qinfo)->rq.bah = prefix##_ARQBAH;			\
+	(qinfo)->rq.bal = prefix##_ARQBAL;			\
+	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
+	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
+	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
+} while (0)
+
 /**
  * ice_adminq_init_regs - Initialize AdminQ registers
  * @hw: pointer to the hardware structure
@@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
 {
 	struct ice_ctl_q_info *cq = &hw->adminq;
 
-	cq->sq.head = PF_FW_ATQH;
-	cq->sq.tail = PF_FW_ATQT;
-	cq->sq.len = PF_FW_ATQLEN;
-	cq->sq.bah = PF_FW_ATQBAH;
-	cq->sq.bal = PF_FW_ATQBAL;
-	cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
-	cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
-	cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
-
-	cq->rq.head = PF_FW_ARQH;
-	cq->rq.tail = PF_FW_ARQT;
-	cq->rq.len = PF_FW_ARQLEN;
-	cq->rq.bah = PF_FW_ARQBAH;
-	cq->rq.bal = PF_FW_ARQBAL;
-	cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
-	cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
-	cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
+	ICE_CQ_INIT_REGS(cq, PF_FW);
 }
 
 /**
@@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
 {
 	struct ice_ctl_q_info *cq = &hw->mailboxq;
 
-	/* set head and tail registers in our local struct */
-	cq->sq.head = PF_MBX_ATQH;
-	cq->sq.tail = PF_MBX_ATQT;
-	cq->sq.len = PF_MBX_ATQLEN;
-	cq->sq.bah = PF_MBX_ATQBAH;
-	cq->sq.bal = PF_MBX_ATQBAL;
-	cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
-	cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
-	cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
-
-	cq->rq.head = PF_MBX_ARQH;
-	cq->rq.tail = PF_MBX_ARQT;
-	cq->rq.len = PF_MBX_ARQLEN;
-	cq->rq.bah = PF_MBX_ARQBAH;
-	cq->rq.bal = PF_MBX_ARQBAL;
-	cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
-	cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
-	cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+	ICE_CQ_INIT_REGS(cq, PF_MBX);
 }
 
 /**
@@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 }
 
 /**
- * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
+ * ice_free_cq_ring - Free control queue ring
  * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
+ * @ring: pointer to the specific control queue ring
  *
- * This assumes the posted send buffers have already been cleaned
+ * This assumes the posted buffers have already been cleaned
  * and de-allocated
  */
-static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
 {
-	dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
-			   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
-	cq->sq.desc_buf.va = NULL;
-	cq->sq.desc_buf.pa = 0;
-	cq->sq.desc_buf.size = 0;
-}
-
-/**
- * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- *
- * This assumes the posted receive buffers have already been cleaned
- * and de-allocated
- */
-static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
-	dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
-			   cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
-	cq->rq.desc_buf.va = NULL;
-	cq->rq.desc_buf.pa = 0;
-	cq->rq.desc_buf.size = 0;
+	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
+			   ring->desc_buf.va, ring->desc_buf.pa);
+	ring->desc_buf.va = NULL;
+	ring->desc_buf.pa = 0;
+	ring->desc_buf.size = 0;
 }
 
 /**
@@ -280,54 +250,23 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 	return ICE_ERR_NO_MEMORY;
 }
 
-/**
- * ice_free_rq_bufs - Free ARQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static enum ice_status
+ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
 {
-	int i;
+	/* Clear Head and Tail */
+	wr32(hw, ring->head, 0);
+	wr32(hw, ring->tail, 0);
 
-	/* free descriptors */
-	for (i = 0; i < cq->num_rq_entries; i++) {
-		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
-				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
-		cq->rq.r.rq_bi[i].va = NULL;
-		cq->rq.r.rq_bi[i].pa = 0;
-		cq->rq.r.rq_bi[i].size = 0;
-	}
+	/* set starting point */
+	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
+	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
+	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
 
-	/* free the dma header */
-	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
-}
+	/* Check one register to verify that config was applied */
+	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
+		return ICE_ERR_AQ_ERROR;
 
-/**
- * ice_free_sq_bufs - Free ATQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
-	int i;
-
-	/* only unmap if the address is non-NULL */
-	for (i = 0; i < cq->num_sq_entries; i++)
-		if (cq->sq.r.sq_bi[i].pa) {
-			dmam_free_coherent(ice_hw_to_dev(hw),
-					   cq->sq.r.sq_bi[i].size,
-					   cq->sq.r.sq_bi[i].va,
-					   cq->sq.r.sq_bi[i].pa);
-			cq->sq.r.sq_bi[i].va = NULL;
-			cq->sq.r.sq_bi[i].pa = 0;
-			cq->sq.r.sq_bi[i].size = 0;
-		}
-
-	/* free the buffer info list */
-	devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
-
-	/* free the dma header */
-	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+	return 0;
 }
 
 /**
@@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 static enum ice_status
 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 {
-	u32 reg = 0;
-
-	/* Clear Head and Tail */
-	wr32(hw, cq->sq.head, 0);
-	wr32(hw, cq->sq.tail, 0);
-
-	/* set starting point */
-	wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
-	wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
-	wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
-
-	/* Check one register to verify that config was applied */
-	reg = rd32(hw, cq->sq.bal);
-	if (reg != lower_32_bits(cq->sq.desc_buf.pa))
-		return ICE_ERR_AQ_ERROR;
-
-	return 0;
+	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
 }
 
 /**
@@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 static enum ice_status
 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 {
-	u32 reg = 0;
+	enum ice_status status;
 
-	/* Clear Head and Tail */
-	wr32(hw, cq->rq.head, 0);
-	wr32(hw, cq->rq.tail, 0);
-
-	/* set starting point */
-	wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
-	wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
-	wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
+	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
+	if (status)
+		return status;
 
 	/* Update tail in the HW to post pre-allocated buffers */
 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
 
-	/* Check one register to verify that config was applied */
-	reg = rd32(hw, cq->rq.bal);
-	if (reg != lower_32_bits(cq->rq.desc_buf.pa))
-		return ICE_ERR_AQ_ERROR;
-
 	return 0;
 }
 
@@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 	goto init_ctrlq_exit;
 
 init_ctrlq_free_rings:
-	ice_free_ctrlq_sq_ring(hw, cq);
+	ice_free_cq_ring(hw, &cq->sq);
 
 init_ctrlq_exit:
 	return ret_code;
@@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 	goto init_ctrlq_exit;
 
 init_ctrlq_free_rings:
-	ice_free_ctrlq_rq_ring(hw, cq);
+	ice_free_cq_ring(hw, &cq->rq);
 
 init_ctrlq_exit:
 	return ret_code;
 }
 
+#define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
+do {									\
+	int i;								\
+	/* free descriptors */						\
+	for (i = 0; i < (qi)->num_##ring##_entries; i++)		\
+		if ((qi)->ring.r.ring##_bi[i].pa) {			\
+			dmam_free_coherent(ice_hw_to_dev(hw),		\
+					   (qi)->ring.r.ring##_bi[i].size,\
+					   (qi)->ring.r.ring##_bi[i].va,\
+					   (qi)->ring.r.ring##_bi[i].pa);\
+			(qi)->ring.r.ring##_bi[i].va = NULL;		\
+			(qi)->ring.r.ring##_bi[i].pa = 0;		\
+			(qi)->ring.r.ring##_bi[i].size = 0;		\
+		}							\
+	/* free the buffer info list */					\
+	if ((qi)->ring.cmd_buf)						\
+		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
+	/* free dma head */						\
+	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
+} while (0)
+
 /**
  * ice_shutdown_sq - shutdown the Control ATQ
  * @hw: pointer to the hardware structure
@@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 	cq->sq.count = 0;	/* to indicate uninitialized queue */
 
 	/* free ring buffers and the ring itself */
-	ice_free_sq_bufs(hw, cq);
-	ice_free_ctrlq_sq_ring(hw, cq);
+	ICE_FREE_CQ_BUFS(hw, cq, sq);
+	ice_free_cq_ring(hw, &cq->sq);
 
 shutdown_sq_out:
 	mutex_unlock(&cq->sq_lock);
@@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 	cq->rq.count = 0;
 
 	/* free ring buffers and the ring itself */
-	ice_free_rq_bufs(hw, cq);
-	ice_free_ctrlq_rq_ring(hw, cq);
+	ICE_FREE_CQ_BUFS(hw, cq, rq);
+	ice_free_cq_ring(hw, &cq->rq);
 
 shutdown_rq_out:
 	mutex_unlock(&cq->rq_lock);
@@ -657,7 +591,6 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
  *     - cq->num_rq_entries
  *     - cq->rq_buf_size
  *     - cq->sq_buf_size
- *
  */
 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 {
@@ -841,7 +774,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
  * @cd: pointer to command details structure
  *
- * This is the main send command routine for the ATQ.  It runs the q,
+ * This is the main send command routine for the ATQ. It runs the queue,
  * cleans the queue, etc.
  */
 enum ice_status
@@ -1035,7 +968,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
  * @pending: number of events that could be left to process
  *
  * This function cleans one Admin Receive Queue element and returns
- * the contents through e.  It can also return how many events are
+ * the contents through e. It can also return how many events are
  * left to process through 'pending'.
  */
 enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 648acdb..3b6e387 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
  * The PF_STATs are appended to the netdev stats only when ethtool -S
  * is queried on the base PF netdev.
  */
-static struct ice_stats ice_gstrings_pf_stats[] = {
+static const struct ice_stats ice_gstrings_pf_stats[] = {
 	ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
 	ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
 	ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = {
 	ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
 };
 
-static u32 ice_regs_dump_list[] = {
+static const u32 ice_regs_dump_list[] = {
 	PFGEN_STATE,
 	PRTGEN_STATUS,
 	QRX_CTRL(0),
@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
 		 * a private ethtool flag). This is due to the nature of the
 		 * ethtool stats API.
 		 *
-		 * User space programs such as ethtool must make 3 separate
+		 * Userspace programs such as ethtool must make 3 separate
 		 * ioctl requests, one for size, one for the strings, and
 		 * finally one for the stats. Since these cross into
-		 * user space, changes to the number or size could result in
+		 * userspace, changes to the number or size could result in
 		 * undefined memory access or incorrect string<->value
 		 * correlations for statistics.
 		 *
@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev)
 {
 	/* restart autonegotiation */
 	struct ice_netdev_priv *np = netdev_priv(netdev);
-	struct ice_link_status *hw_link_info;
 	struct ice_vsi *vsi = np->vsi;
 	struct ice_port_info *pi;
 	enum ice_status status;
-	bool link_up;
 
 	pi = vsi->port_info;
-	hw_link_info = &pi->phy.link_info;
-	link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+	/* If VSI state is up, then restart autoneg with link up */
+	if (!test_bit(__ICE_DOWN, vsi->back->state))
+		status = ice_aq_set_link_restart_an(pi, true, NULL);
+	else
+		status = ice_aq_set_link_restart_an(pi, false, NULL);
 
-	status = ice_aq_set_link_restart_an(pi, link_up, NULL);
 	if (status) {
 		netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
 			    status, pi->hw->adminq.sq_last_status);
@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
 /**
  * ice_set_pauseparam - Set Flow Control parameter
  * @netdev: network interface device structure
- * @pause: return tx/rx flow control status
+ * @pause: return Tx/Rx flow control status
  */
 static int
 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
 }
 
 /**
- * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
  * @netdev: network interface device structure
  *
  * Returns the table size.
@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
 }
 
 /**
- * ice_get_rxfh - get the rx flow hash indirection table
+ * ice_get_rxfh - get the Rx flow hash indirection table
  * @netdev: network interface device structure
  * @indir: indirection table
  * @key: hash key
@@ -1603,7 +1603,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
 }
 
 /**
- * ice_set_rxfh - set the rx flow hash indirection table
+ * ice_set_rxfh - set the Rx flow hash indirection table
  * @netdev: network interface device structure
  * @indir: indirection table
  * @key: hash key
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 596b9fb..5507928 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -7,6 +7,9 @@
 #define _ICE_HW_AUTOGEN_H_
 
 #define QTX_COMM_DBELL(_DBQM)			(0x002C0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD(_DBQM)			(0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_HEAD_S			0
+#define QTX_COMM_HEAD_HEAD_M			ICE_M(0x1FFF, 0)
 #define PF_FW_ARQBAH				0x00080180
 #define PF_FW_ARQBAL				0x00080080
 #define PF_FW_ARQH				0x00080380
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 7d2a667..bb51dd7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -6,11 +6,11 @@
 
 union ice_32byte_rx_desc {
 	struct {
-		__le64  pkt_addr; /* Packet buffer address */
-		__le64  hdr_addr; /* Header buffer address */
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
 			/* bit 0 of hdr_addr is DD bit */
-		__le64  rsvd1;
-		__le64  rsvd2;
+		__le64 rsvd1;
+		__le64 rsvd2;
 	} read;
 	struct {
 		struct {
@@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer {
  */
 union ice_32b_rx_flex_desc {
 	struct {
-		__le64  pkt_addr; /* Packet buffer address */
-		__le64  hdr_addr; /* Header buffer address */
-				  /* bit 0 of hdr_addr is DD bit */
-		__le64  rsvd1;
-		__le64  rsvd2;
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+				 /* bit 0 of hdr_addr is DD bit */
+		__le64 rsvd1;
+		__le64 rsvd2;
 	} read;
 	struct {
 		/* Qword 0 */
@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits {
 
 #define ICE_RXQ_CTX_SIZE_DWORDS		8
 #define ICE_RXQ_CTX_SZ			(ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS	22
+#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS	5
+#define GLTCLAN_CQ_CNTX(i, CQ)		(GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
 
 /* RLAN Rx queue context data
  *
@@ -274,18 +277,18 @@ struct ice_rlan_ctx {
 	u16 dbuf; /* bigger than needed, see above for reason */
 #define ICE_RLAN_CTX_HBUF_S 6
 	u16 hbuf; /* bigger than needed, see above for reason */
-	u8  dtype;
-	u8  dsize;
-	u8  crcstrip;
-	u8  l2tsel;
-	u8  hsplit_0;
-	u8  hsplit_1;
-	u8  showiv;
+	u8 dtype;
+	u8 dsize;
+	u8 crcstrip;
+	u8 l2tsel;
+	u8 hsplit_0;
+	u8 hsplit_1;
+	u8 showiv;
 	u32 rxmax; /* bigger than needed, see above for reason */
-	u8  tphrdesc_ena;
-	u8  tphwdesc_ena;
-	u8  tphdata_ena;
-	u8  tphhead_ena;
+	u8 tphrdesc_ena;
+	u8 tphwdesc_ena;
+	u8 tphdata_ena;
+	u8 tphhead_ena;
 	u16 lrxqthresh; /* bigger than needed, see above for reason */
 };
 
@@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits {
 struct ice_tlan_ctx {
 #define ICE_TLAN_CTX_BASE_S	7
 	u64 base;		/* base is defined in 128-byte units */
-	u8  port_num;
+	u8 port_num;
 	u16 cgd_num;		/* bigger than needed, see above for reason */
-	u8  pf_num;
+	u8 pf_num;
 	u16 vmvf_num;
-	u8  vmvf_type;
+	u8 vmvf_type;
 #define ICE_TLAN_CTX_VMVF_TYPE_VF	0
 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ	1
 #define ICE_TLAN_CTX_VMVF_TYPE_PF	2
 	u16 src_vsi;
-	u8  tsyn_ena;
-	u8  alt_vlan;
+	u8 tsyn_ena;
+	u8 alt_vlan;
 	u16 cpuid;		/* bigger than needed, see above for reason */
-	u8  wb_mode;
-	u8  tphrd_desc;
-	u8  tphrd;
-	u8  tphwr_desc;
+	u8 wb_mode;
+	u8 tphrd_desc;
+	u8 tphrd;
+	u8 tphwr_desc;
 	u16 cmpq_id;
 	u16 qnum_in_func;
-	u8  itr_notification_mode;
-	u8  adjust_prof_id;
+	u8 itr_notification_mode;
+	u8 adjust_prof_id;
 	u32 qlen;		/* bigger than needed, see above for reason */
-	u8  quanta_prof_idx;
-	u8  tso_ena;
+	u8 quanta_prof_idx;
+	u8 tso_ena;
 	u16 tso_qnum;
-	u8  legacy_int;
-	u8  drop_ena;
-	u8  cache_prof_idx;
-	u8  pkt_shaper_prof_idx;
-	u8  int_q_state;	/* width not needed - internal do not write */
+	u8 legacy_int;
+	u8 drop_ena;
+	u8 cache_prof_idx;
+	u8 pkt_shaper_prof_idx;
+	u8 int_q_state;	/* width not needed - internal do not write */
 };
 
 /* macro to make the table lines short */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1041fa2..29b1dcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
 	u16 pf_q;
 	int err;
 
-	/* what is RX queue number in global space of 2K Rx queues */
+	/* what is Rx queue number in global space of 2K Rx queues */
 	pf_q = vsi->rxq_map[ring->q_index];
 
 	/* clear the context structure first */
@@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
 {
 	int i;
 
-	for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+	for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
 		u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
 
 		if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
 			break;
 
-		usleep_range(10, 20);
+		usleep_range(20, 40);
 	}
-	if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+	if (i >= ICE_Q_WAIT_MAX_RETRY)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
  */
 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
 {
-	u16 offset = 0, qmap = 0, numq_tc;
-	u16 pow = 0, max_rss = 0, qcount;
+	u16 offset = 0, qmap = 0, tx_count = 0;
 	u16 qcount_tx = vsi->alloc_txq;
 	u16 qcount_rx = vsi->alloc_rxq;
+	u16 tx_numq_tc, rx_numq_tc;
+	u16 pow = 0, max_rss = 0;
 	bool ena_tc0 = false;
+	u8 netdev_tc = 0;
 	int i;
 
 	/* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
 		vsi->tc_cfg.ena_tc |= 1;
 	}
 
-	numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+	rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+	if (!rx_numq_tc)
+		rx_numq_tc = 1;
+	tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
+	if (!tx_numq_tc)
+		tx_numq_tc = 1;
 
 	/* TC mapping is a function of the number of Rx queues assigned to the
 	 * VSI for each traffic class and the offset of these queues.
@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
 	 * Setup number and offset of Rx queues for all TCs for the VSI
 	 */
 
-	qcount = numq_tc;
+	qcount_rx = rx_numq_tc;
+
 	/* qcount will change if RSS is enabled */
 	if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
 		if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
 				max_rss = ICE_MAX_LG_RSS_QS;
 			else
 				max_rss = ICE_MAX_SMALL_RSS_QS;
-			qcount = min_t(int, numq_tc, max_rss);
-			qcount = min_t(int, qcount, vsi->rss_size);
+			qcount_rx = min_t(int, rx_numq_tc, max_rss);
+			qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
 		}
 	}
 
 	/* find the (rounded up) power-of-2 of qcount */
-	pow = order_base_2(qcount);
+	pow = order_base_2(qcount_rx);
 
 	for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
 			/* TC is not enabled */
 			vsi->tc_cfg.tc_info[i].qoffset = 0;
-			vsi->tc_cfg.tc_info[i].qcount = 1;
+			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
 			ctxt->info.tc_mapping[i] = 0;
 			continue;
 		}
 
 		/* TC is enabled */
 		vsi->tc_cfg.tc_info[i].qoffset = offset;
-		vsi->tc_cfg.tc_info[i].qcount = qcount;
+		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+		vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
+		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
 
 		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
 			ICE_AQ_VSI_TC_Q_OFFSET_M) |
 			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
 			 ICE_AQ_VSI_TC_Q_NUM_M);
-		offset += qcount;
+		offset += qcount_rx;
+		tx_count += tx_numq_tc;
 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
 	}
-
-	vsi->num_txq = qcount_tx;
 	vsi->num_rxq = offset;
+	vsi->num_txq = tx_count;
 
 	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
 		dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1000,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
  * @vsi: the VSI being configured
  * @v_idx: index of the vector in the VSI struct
  *
- * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
  */
 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
 {
@@ -1039,7 +1051,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
  * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
  * @vsi: the VSI being configured
  *
- * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * We allocate one q_vector per queue interrupt. If allocation fails we
  * return -ENOMEM.
  */
 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
@@ -1188,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
 	struct ice_pf *pf = vsi->back;
 	int i;
 
-	/* Allocate tx_rings */
+	/* Allocate Tx rings */
 	for (i = 0; i < vsi->alloc_txq; i++) {
 		struct ice_ring *ring;
 
@@ -1207,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
 		vsi->tx_rings[i] = ring;
 	}
 
-	/* Allocate rx_rings */
+	/* Allocate Rx rings */
 	for (i = 0; i < vsi->alloc_rxq; i++) {
 		struct ice_ring *ring;
 
@@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
 	struct ice_aqc_add_tx_qgrp *qg_buf;
 	struct ice_aqc_add_txqs_perq *txq;
 	struct ice_pf *pf = vsi->back;
+	u8 num_q_grps, q_idx = 0;
 	enum ice_status status;
 	u16 buf_len, i, pf_q;
 	int err = 0, tc = 0;
-	u8 num_q_grps;
 
 	buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
 	qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
 	if (!qg_buf)
 		return -ENOMEM;
 
-	if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
-		err = -EINVAL;
-		goto err_cfg_txqs;
-	}
 	qg_buf->num_txqs = 1;
 	num_q_grps = 1;
 
-	/* set up and configure the Tx queues */
-	ice_for_each_txq(vsi, i) {
-		struct ice_tlan_ctx tlan_ctx = { 0 };
+	/* set up and configure the Tx queues for each enabled TC */
+	for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+		if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+			break;
 
-		pf_q = vsi->txq_map[i];
-		ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
-		/* copy context contents into the qg_buf */
-		qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
-		ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
-			    ice_tlan_ctx_info);
+		for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+			struct ice_tlan_ctx tlan_ctx = { 0 };
 
-		/* init queue specific tail reg. It is referred as transmit
-		 * comm scheduler queue doorbell.
-		 */
-		vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
-		status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
-					 num_q_grps, qg_buf, buf_len, NULL);
-		if (status) {
-			dev_err(&vsi->back->pdev->dev,
-				"Failed to set LAN Tx queue context, error: %d\n",
-				status);
-			err = -ENODEV;
-			goto err_cfg_txqs;
+			pf_q = vsi->txq_map[q_idx];
+			ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
+					 pf_q);
+			/* copy context contents into the qg_buf */
+			qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+			ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+				    ice_tlan_ctx_info);
+
+			/* init queue specific tail reg. It is referred as
+			 * transmit comm scheduler queue doorbell.
+			 */
+			vsi->tx_rings[q_idx]->tail =
+				pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+			status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+						 num_q_grps, qg_buf, buf_len,
+						 NULL);
+			if (status) {
+				dev_err(&vsi->back->pdev->dev,
+					"Failed to set LAN Tx queue context, error: %d\n",
+					status);
+				err = -ENODEV;
+				goto err_cfg_txqs;
+			}
+
+			/* Add Tx Queue TEID into the VSI Tx ring from the
+			 * response. This will complete configuring and
+			 * enabling the queue.
+			 */
+			txq = &qg_buf->txqs[0];
+			if (pf_q == le16_to_cpu(txq->txq_id))
+				vsi->tx_rings[q_idx]->txq_teid =
+					le32_to_cpu(txq->q_teid);
+
+			q_idx++;
 		}
-
-		/* Add Tx Queue TEID into the VSI Tx ring from the response
-		 * This will complete configuring and enabling the queue.
-		 */
-		txq = &qg_buf->txqs[0];
-		if (pf_q == le16_to_cpu(txq->txq_id))
-			vsi->tx_rings[i]->txq_teid =
-				le32_to_cpu(txq->q_teid);
 	}
 err_cfg_txqs:
 	devm_kfree(&pf->pdev->dev, qg_buf);
@@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
 	ice_for_each_txq(vsi, i) {
 		u16 v_idx;
 
-		if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+		if (!vsi->tx_rings || !vsi->tx_rings[i] ||
+		    !vsi->tx_rings[i]->q_vector) {
 			err = -EINVAL;
 			goto err_out;
 		}
@@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
 	/* set RSS capabilities */
 	ice_vsi_set_rss_params(vsi);
 
+	/* set tc configuration */
+	ice_vsi_set_tc_cfg(vsi);
+
 	/* create the VSI */
 	ret = ice_vsi_init(vsi);
 	if (ret)
@@ -2113,17 +2136,13 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
 		pf->q_left_rx -= vsi->alloc_rxq;
 		break;
 	default:
-		/* if VSI type is not recognized, clean up the resources and
-		 * exit
-		 */
+		/* clean up the resources and exit */
 		goto unroll_vsi_init;
 	}
 
-	ice_vsi_set_tc_cfg(vsi);
-
 	/* configure VSI nodes based on number of queues and TC's */
 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
-		max_txqs[i] = vsi->num_txq;
+		max_txqs[i] = pf->num_lan_tx;
 
 	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
 			      max_txqs);
@@ -2314,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
 	int start = res->search_hint;
 	int end = start;
 
-	if ((start + needed) >  res->num_entries)
+	if ((start + needed) > res->num_entries)
 		return -ENOMEM;
 
 	id |= ICE_RES_VALID_BIT;
@@ -2491,6 +2510,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
 	}
 
 	ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
 	ice_vsi_delete(vsi);
 	ice_vsi_free_q_vectors(vsi);
 	ice_vsi_clear_rings(vsi);
@@ -2518,11 +2538,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
 int ice_vsi_rebuild(struct ice_vsi *vsi)
 {
 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+	struct ice_pf *pf;
 	int ret, i;
 
 	if (!vsi)
 		return -EINVAL;
 
+	pf = vsi->back;
+	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
 	ice_vsi_free_q_vectors(vsi);
 	ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
 	ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
@@ -2532,6 +2555,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
 	ice_vsi_free_arrays(vsi, false);
 	ice_dev_onetime_setup(&vsi->back->hw);
 	ice_vsi_set_num_qs(vsi);
+	ice_vsi_set_tc_cfg(vsi);
 
 	/* Initialize VSI struct elements and create VSI in FW */
 	ret = ice_vsi_init(vsi);
@@ -2578,11 +2602,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
 		break;
 	}
 
-	ice_vsi_set_tc_cfg(vsi);
-
 	/* configure VSI nodes based on number of queues and TC's */
 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
-		max_txqs[i] = vsi->num_txq;
+		max_txqs[i] = pf->num_lan_tx;
 
 	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
 			      max_txqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 333312a..e45e574 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
 	/* disable the VSIs and their queues that are not already DOWN */
 	ice_pf_dis_all_vsi(pf);
 
+	if (hw->port_info)
+		ice_sched_clear_port(hw->port_info);
+
 	ice_shutdown_all_ctrlq(hw);
 
 	set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
@@ -405,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
 	 * of reset is pending and sets bits in pf->state indicating the reset
-	 * type and __ICE_RESET_OICR_RECV.  So, if the latter bit is set
+	 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
 	 * prepare for pending reset if not already (for PF software-initiated
 	 * global resets the software should already be prepared for it as
 	 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
@@ -1379,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
  * @pf: board private structure
  *
  * This sets up the handler for MSIX 0, which is used to manage the
- * non-queue interrupts, e.g. AdminQ and errors.  This is not used
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
  * when in MSI or Legacy interrupt mode.
  */
 static int ice_req_irq_msix_misc(struct ice_pf *pf)
@@ -1783,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
 
 	pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
 
-	/* only 1 rx queue unless RSS is enabled */
+	/* only 1 Rx queue unless RSS is enabled */
 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
 		pf->num_lan_rx = 1;
 	else
@@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev,
 
 	ice_determine_q_usage(pf);
 
-	pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
-				  hw->func_caps.guaranteed_num_vsi);
+	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
 	if (!pf->num_alloc_vsi) {
 		err = -EIO;
 		goto err_init_pf_unroll;
@@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
 		if (err)
 			return err;
 	}
-
 	err = ice_vsi_cfg_txqs(vsi);
 	if (!err)
 		err = ice_vsi_cfg_rxqs(vsi);
@@ -3138,8 +3139,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
 /**
  * ice_dis_vsi - pause a VSI
  * @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
  */
-static void ice_dis_vsi(struct ice_vsi *vsi)
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
 {
 	if (test_bit(__ICE_DOWN, vsi->state))
 		return;
@@ -3148,9 +3150,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
 
 	if (vsi->type == ICE_VSI_PF && vsi->netdev) {
 		if (netif_running(vsi->netdev)) {
-			rtnl_lock();
-			vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
-			rtnl_unlock();
+			if (!locked) {
+				rtnl_lock();
+				vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+				rtnl_unlock();
+			} else {
+				vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+			}
 		} else {
 			ice_vsi_close(vsi);
 		}
@@ -3189,7 +3195,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
 
 	ice_for_each_vsi(pf, v)
 		if (pf->vsi[v])
-			ice_dis_vsi(pf->vsi[v]);
+			ice_dis_vsi(pf->vsi[v], false);
 }
 
 /**
@@ -3668,7 +3674,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
 		 */
 		status = ice_update_sw_rule_bridge_mode(hw);
 		if (status) {
-			netdev_err(dev, "update SW_RULE for bridge mode failed,  = %d err %d aq_err %d\n",
+			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
 				   mode, status, hw->adminq.sq_last_status);
 			/* revert hw->evb_veb */
 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
@@ -3691,40 +3697,36 @@ static void ice_tx_timeout(struct net_device *netdev)
 	struct ice_ring *tx_ring = NULL;
 	struct ice_vsi *vsi = np->vsi;
 	struct ice_pf *pf = vsi->back;
-	u32 head, val = 0, i;
 	int hung_queue = -1;
+	u32 i;
 
 	pf->tx_timeout_count++;
 
-	/* find the stopped queue the same way the stack does */
+	/* find the stopped queue the same way dev_watchdog() does */
 	for (i = 0; i < netdev->num_tx_queues; i++) {
-		struct netdev_queue *q;
 		unsigned long trans_start;
+		struct netdev_queue *q;
 
 		q = netdev_get_tx_queue(netdev, i);
 		trans_start = q->trans_start;
 		if (netif_xmit_stopped(q) &&
 		    time_after(jiffies,
-			       (trans_start + netdev->watchdog_timeo))) {
+			       trans_start + netdev->watchdog_timeo)) {
 			hung_queue = i;
 			break;
 		}
 	}
 
-	if (i == netdev->num_tx_queues) {
+	if (i == netdev->num_tx_queues)
 		netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
-	} else {
+	else
 		/* now that we have an index, find the tx_ring struct */
-		for (i = 0; i < vsi->num_txq; i++) {
-			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
-				if (hung_queue ==
-				    vsi->tx_rings[i]->q_index) {
+		for (i = 0; i < vsi->num_txq; i++)
+			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+				if (hung_queue == vsi->tx_rings[i]->q_index) {
 					tx_ring = vsi->tx_rings[i];
 					break;
 				}
-			}
-		}
-	}
 
 	/* Reset recovery level if enough time has elapsed after last timeout.
 	 * Also ensure no new reset action happens before next timeout period.
@@ -3736,17 +3738,20 @@ static void ice_tx_timeout(struct net_device *netdev)
 		return;
 
 	if (tx_ring) {
-		head = tx_ring->next_to_clean;
+		struct ice_hw *hw = &pf->hw;
+		u32 head, val = 0;
+
+		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
 		/* Read interrupt register */
 		if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
-			val = rd32(&pf->hw,
+			val = rd32(hw,
 				   GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
-					tx_ring->vsi->hw_base_vector));
+						 tx_ring->vsi->hw_base_vector));
 
-		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
 			    vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
-			    head, tx_ring->next_to_use,
-			    readl(tx_ring->tail), val);
+			    head, tx_ring->next_to_use, val);
 	}
 
 	pf->tx_timeout_last_recovery = jiffies;
@@ -3780,7 +3785,7 @@ static void ice_tx_timeout(struct net_device *netdev)
  * @netdev: network interface device structure
  *
  * The open entry point is called when a network interface is made
- * active by the system (IFF_UP).  At this point all resources needed
+ * active by the system (IFF_UP). At this point all resources needed
  * for transmit and receive operations are allocated, the interrupt
  * handler is registered with the OS, the netdev watchdog is enabled,
  * and the stack is notified that the interface is ready.
@@ -3813,7 +3818,7 @@ static int ice_open(struct net_device *netdev)
  * @netdev: network interface device structure
  *
  * The stop entry point is called when an interface is de-activated by the OS,
- * and the netdevice enters the DOWN state.  The hardware is still under the
+ * and the netdevice enters the DOWN state. The hardware is still under the
  * driver's control, but the netdev interface is disabled.
  *
  * Returns success only - not allowed to fail
@@ -3842,14 +3847,14 @@ ice_features_check(struct sk_buff *skb,
 	size_t len;
 
 	/* No point in doing any of this if neither checksum nor GSO are
-	 * being requested for this frame.  We can rule out both by just
+	 * being requested for this frame. We can rule out both by just
 	 * checking for CHECKSUM_PARTIAL
 	 */
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 		return features;
 
 	/* We cannot support GSO if the MSS is going to be less than
-	 * 64 bytes.  If it is then we need to drop support for GSO.
+	 * 64 bytes. If it is then we need to drop support for GSO.
 	 */
 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
 		features &= ~NETIF_F_GSO_MASK;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7cc8aa1..a168185 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
  *
  * Cleanup scheduling elements from SW DB
  */
-static void ice_sched_clear_port(struct ice_port_info *pi)
+void ice_sched_clear_port(struct ice_port_info *pi)
 {
 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
 		return;
@@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
  * This function removes the leaf node that was created by the FW
  * during initialization
  */
-static void
-ice_rm_dflt_leaf_node(struct ice_port_info *pi)
+static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
 {
 	struct ice_sched_node *node;
 
@@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi)
  * This function frees all the nodes except root and TC that were created by
  * the FW during initialization
  */
-static void
-ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
+static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
 {
 	struct ice_sched_node *node;
 
@@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
  * @num_nodes: pointer to num nodes array
  *
  * This function calculates the number of supported nodes needed to add this
- * VSI into tx tree including the VSI, parent and intermediate nodes in below
+ * VSI into Tx tree including the VSI, parent and intermediate nodes in below
  * layers
  */
 static void
@@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
 }
 
 /**
- * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
+ * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
  * @pi: port information structure
  * @vsi_handle: software VSI handle
  * @tc_node: pointer to TC node
  * @num_nodes: pointer to num nodes array
  *
- * This function adds the VSI supported nodes into tx tree including the
+ * This function adds the VSI supported nodes into Tx tree including the
  * VSI, its parent and intermediate nodes in below layers
  */
 static enum ice_status
@@ -1527,7 +1525,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
 }
 
 /**
- * ice_sched_cfg_vsi - configure the new/exisiting VSI
+ * ice_sched_cfg_vsi - configure the new/existing VSI
  * @pi: port information structure
  * @vsi_handle: software VSI handle
  * @tc: TC number
@@ -1605,3 +1603,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
 
 	return status;
 }
+
+/**
+ * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function removes single aggregator VSI info entry from
+ * aggregator list.
+ */
+static void
+ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+{
+	struct ice_sched_agg_info *agg_info;
+	struct ice_sched_agg_info *atmp;
+
+	list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+		struct ice_sched_agg_vsi_info *agg_vsi_info;
+		struct ice_sched_agg_vsi_info *vtmp;
+
+		list_for_each_entry_safe(agg_vsi_info, vtmp,
+					 &agg_info->agg_vsi_list, list_entry)
+			if (agg_vsi_info->vsi_handle == vsi_handle) {
+				list_del(&agg_vsi_info->list_entry);
+				devm_kfree(ice_hw_to_dev(pi->hw),
+					   agg_vsi_info);
+				return;
+			}
+	}
+}
+
+/**
+ * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @owner: LAN or RDMA
+ *
+ * This function removes the VSI and its LAN or RDMA children nodes from the
+ * scheduler tree.
+ */
+static enum ice_status
+ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+{
+	enum ice_status status = ICE_ERR_PARAM;
+	struct ice_vsi_ctx *vsi_ctx;
+	u8 i, j = 0;
+
+	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+		return status;
+	mutex_lock(&pi->sched_lock);
+	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+	if (!vsi_ctx)
+		goto exit_sched_rm_vsi_cfg;
+
+	for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+		struct ice_sched_node *vsi_node, *tc_node;
+
+		tc_node = ice_sched_get_tc_node(pi, i);
+		if (!tc_node)
+			continue;
+
+		vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
+		if (!vsi_node)
+			continue;
+
+		while (j < vsi_node->num_children) {
+			if (vsi_node->children[j]->owner == owner) {
+				ice_free_sched_node(pi, vsi_node->children[j]);
+
+				/* reset the counter again since the num
+				 * children will be updated after node removal
+				 */
+				j = 0;
+			} else {
+				j++;
+			}
+		}
+		/* remove the VSI if it has no children */
+		if (!vsi_node->num_children) {
+			ice_free_sched_node(pi, vsi_node);
+			vsi_ctx->sched.vsi_node[i] = NULL;
+
+			/* clean up agg related vsi info if any */
+			ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+		}
+		if (owner == ICE_SCHED_NODE_OWNER_LAN)
+			vsi_ctx->sched.max_lanq[i] = 0;
+	}
+	status = 0;
+
+exit_sched_rm_vsi_cfg:
+	mutex_unlock(&pi->sched_lock);
+	return status;
+}
+
+/**
+ * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its LAN children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 5dc9cfa..da5b4c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,6 +12,7 @@
 struct ice_sched_agg_vsi_info {
 	struct list_head list_entry;
 	DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+	u16 vsi_handle;
 };
 
 struct ice_sched_agg_info {
@@ -25,6 +26,7 @@ struct ice_sched_agg_info {
 /* FW AQ command calls */
 enum ice_status ice_sched_init_port(struct ice_port_info *pi);
 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_clear_port(struct ice_port_info *pi);
 void ice_sched_cleanup_all(struct ice_hw *hw);
 struct ice_sched_node *
 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
@@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 enum ice_status
 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
 		  u8 owner, bool enable);
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
 #endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 027eba4..533b989 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
  * @link_speed: variable containing the link_speed to be converted
  *
  * Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps.  Else return
+ * If adv_link_support is true, then return link speed in Mbps. Else return
  * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
  * needs to cast back to an enum virtchnl_link_speed in the case where
  * adv_link_support is false, but when adv_link_support is true the caller can
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 40c9c65..2e56931 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  * Allocate memory for the entire recipe table and initialize the structures/
  * entries corresponding to basic recipes.
  */
-enum ice_status
-ice_init_def_sw_recp(struct ice_hw *hw)
+enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
 {
 	struct ice_sw_recipe *recps;
 	u8 i;
@@ -130,7 +129,7 @@ ice_init_def_sw_recp(struct ice_hw *hw)
  *
  * NOTE: *req_desc is both an input/output parameter.
  * The caller of this function first calls this function with *request_desc set
- * to 0.  If the response from f/w has *req_desc set to 0, all the switch
+ * to 0. If the response from f/w has *req_desc set to 0, all the switch
  * configuration information has been returned; if non-zero (meaning not all
  * the information was returned), the caller should call this function again
  * with *req_desc set to the previous value returned by f/w to get the
@@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
 /**
  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  * @hw: pointer to the hardware structure
- * @f_info: filter info structure to fill/update
+ * @fi: filter info structure to fill/update
  *
  * This helper function populates the lb_en and lan_en elements of the provided
  * ice_fltr_info struct using the switch's type and characteristics of the
  * switch rule being configured.
  */
-static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
+static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
 {
-	f_info->lb_en = false;
-	f_info->lan_en = false;
-	if ((f_info->flag & ICE_FLTR_TX) &&
-	    (f_info->fltr_act == ICE_FWD_TO_VSI ||
-	     f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
-	     f_info->fltr_act == ICE_FWD_TO_Q ||
-	     f_info->fltr_act == ICE_FWD_TO_QGRP)) {
-		f_info->lb_en = true;
-		if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
-		      is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
-			f_info->lan_en = true;
+	fi->lb_en = false;
+	fi->lan_en = false;
+	if ((fi->flag & ICE_FLTR_TX) &&
+	    (fi->fltr_act == ICE_FWD_TO_VSI ||
+	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+	     fi->fltr_act == ICE_FWD_TO_Q ||
+	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
+		fi->lb_en = true;
+		/* Do not set lan_en to TRUE if
+		 * 1. The switch is a VEB AND
+		 * 2
+		 * 2.1 The lookup is MAC with unicast addr for MAC, OR
+		 * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+		 *
+		 * In all other cases, the LAN enable has to be set to true.
+		 */
+		if (!(hw->evb_veb &&
+		      ((fi->lkup_type == ICE_SW_LKUP_MAC &&
+			is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+		       (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+			is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+			fi->lan_en = true;
 	}
 }
 
@@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
 	/* Create two back-to-back switch rules and submit them to the HW using
 	 * one memory buffer:
 	 *    1. Large Action
-	 *    2. Look up tx rx
+	 *    2. Look up Tx Rx
 	 */
 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
@@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
 
 	lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
 
-	/* call the fill switch rule to fill the lookup tx rx structure */
+	/* call the fill switch rule to fill the lookup Tx Rx structure */
 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
 			 ice_aqc_opc_update_sw_rules);
 
@@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
  * Call AQ command to add or update previously created VSI list with new VSI.
  *
  * Helper function to do book keeping associated with adding filter information
- * The algorithm to do the booking keeping is described below :
- * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
+ * The algorithm to do the book keeping is described below :
+ * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
  *	if only one VSI has been added till now
  *		Allocate a new VSI list and add two VSIs
  *		to this list using switch rule command
@@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
 		u16 vsi_handle = new_fltr->vsi_handle;
 		enum ice_adminq_opc opcode;
 
+		if (!m_entry->vsi_list_info)
+			return ICE_ERR_CFG;
+
 		/* A rule already exists with the new VSI being added */
 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
 			return 0;
@@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
 		/* Update the previous switch rule to a new VSI list which
-		 * includes current VSI thats requested
+		 * includes current VSI that is requested
 		 */
 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
 		if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index fe5bbab..49fc380 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
 
 /**
  * ice_setup_tx_ring - Allocate the Tx descriptors
- * @tx_ring: the tx ring to set up
+ * @tx_ring: the Tx ring to set up
  *
  * Return 0 on success, negative on error
  */
@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
 
 /**
  * ice_setup_rx_ring - Allocate the Rx descriptors
- * @rx_ring: the rx ring to set up
+ * @rx_ring: the Rx ring to set up
  *
  * Return 0 on success, negative on error
  */
@@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
 	rx_ring->next_to_alloc = val;
 
 	/* Force memory writes to complete before letting h/w
-	 * know there are new descriptors to fetch.  (Only
+	 * know there are new descriptors to fetch. (Only
 	 * applicable for weak-ordered memory model archs,
 	 * such as IA-64).
 	 */
@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
 
 /**
  * ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
+ * @rx_ring: Rx descriptor ring to store buffers on
  * @old_buf: donor buffer to have page reused
  *
  * Synchronizes page for reuse by the adapter
@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
 
 /**
  * ice_fetch_rx_buf - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
  * @rx_desc: descriptor containing info written by hardware
  *
  * This function allocates an skb on the fly, and populates it with the page
@@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
  * ice_pull_tail - ice specific version of skb_pull_tail
  * @skb: pointer to current skb being adjusted
  *
- * This function is an ice specific version of __pskb_pull_tail.  The
+ * This function is an ice specific version of __pskb_pull_tail. The
  * main difference between this version and the original function is that
  * this function can make several assumptions about the state of things
  * that allow for significant optimizations versus the standard function.
@@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
  * @rx_desc: Rx descriptor for current buffer
  * @skb: Current socket buffer containing buffer in progress
  *
- * This function updates next to clean.  If the buffer is an EOP buffer
+ * This function updates next to clean. If the buffer is an EOP buffer
  * this function exits returning false, otherwise it will place the
  * sk_buff in the next buffer to be chained and return true indicating
  * that this is in fact a non-EOP buffer.
@@ -904,7 +904,7 @@ static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
 
 /**
  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_ring: Rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
  * @skb: pointer to current skb being populated
  * @ptype: the packet type decoded by hardware
@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
 
 /**
  * ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
+ * @rx_ring: Rx ring in play
  * @skb: packet to send up
  * @vlan_tag: vlan tag for packet
  *
@@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
 
 /**
  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
  * @budget: Total limit on number of packets to process
  *
  * This function provides a "bounce buffer" approach to Rx interrupt
- * processing.  The advantage to this is that on systems that have
+ * processing. The advantage to this is that on systems that have
  * expensive overhead for IOMMU access this provides a means of avoiding
  * it by maintaining the mapping of the page to the system.
  *
@@ -1103,11 +1103,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
 	if (!clean_complete)
 		return budget;
 
-	/* Work is done so exit the polling mode and re-enable the interrupt */
-	napi_complete_done(napi, work_done);
-	if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
-		ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
-	return 0;
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done)))
+		if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+			ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
+
+	return min(work_done, budget - 1);
 }
 
 /* helper function for building cmd/type/offset */
@@ -1122,7 +1125,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
 }
 
 /**
- * __ice_maybe_stop_tx - 2nd level check for tx stop conditions
+ * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
  * @tx_ring: the ring to be checked
  * @size: the size buffer we want to assure is available
  *
@@ -1145,7 +1148,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
 }
 
 /**
- * ice_maybe_stop_tx - 1st level check for tx stop conditions
+ * ice_maybe_stop_tx - 1st level check for Tx stop conditions
  * @tx_ring: the ring to be checked
  * @size:    the size buffer we want to assure is available
  *
@@ -1155,6 +1158,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
 {
 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
 		return 0;
+
 	return __ice_maybe_stop_tx(tx_ring, size);
 }
 
@@ -1552,7 +1556,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
  * Finally, we add one to round up. Because 256 isn't an exact multiple of
  * 3, we'll underestimate near each multiple of 12K. This is actually more
  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
- * segment.  For our purposes this is accurate out to 1M which is orders of
+ * segment. For our purposes this is accurate out to 1M which is orders of
  * magnitude greater than our largest possible GSO size.
  *
  * This would then be implemented as:
@@ -1568,7 +1572,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
 }
 
 /**
- * ice_xmit_desc_count - calculate number of tx descriptors needed
+ * ice_xmit_desc_count - calculate number of Tx descriptors needed
  * @skb: send buffer
  *
  * Returns number of data descriptors needed for this skb.
@@ -1620,7 +1624,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
 	nr_frags -= ICE_MAX_BUF_TXD - 2;
 	frag = &skb_shinfo(skb)->frags[0];
 
-	/* Initialize size to the negative value of gso_size minus 1.  We
+	/* Initialize size to the negative value of gso_size minus 1. We
 	 * use this as the worst case scenerio in which the frag ahead
 	 * of us only provides one byte which is why we are limited to 6
 	 * descriptors for a single transmit as the header and previous
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f4dbc81..0ea4281 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -124,6 +124,8 @@ struct ice_phy_info {
 
 /* Common HW capabilities for SW use */
 struct ice_hw_common_caps {
+	u32 valid_functions;
+
 	/* TX/RX queues */
 	u16 num_rxq;		/* Number/Total RX queues */
 	u16 rxq_first_id;	/* First queue ID for RX queues */
@@ -150,7 +152,7 @@ struct ice_hw_func_caps {
 	struct ice_hw_common_caps common_cap;
 	u32 num_allocd_vfs;		/* Number of allocated VFs */
 	u32 vf_base_id;			/* Logical ID of the first VF */
-	u32 guaranteed_num_vsi;
+	u32 guar_num_vsi;
 };
 
 /* Device wide capabilities */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e71065f..05ff4f9 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf)
 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 }
 
-/***********************enable_vf routines*****************************/
-
 /**
  * ice_dis_vf_mappings
  * @vf: pointer to the VF structure
@@ -215,6 +213,15 @@ void ice_free_vfs(struct ice_pf *pf)
 	while (test_and_set_bit(__ICE_VF_DIS, pf->state))
 		usleep_range(1000, 2000);
 
+	/* Disable IOV before freeing resources. This lets any VF drivers
+	 * running in the host get themselves cleaned up before we yank
+	 * the carpet out from underneath their feet.
+	 */
+	if (!pci_vfs_assigned(pf->pdev))
+		pci_disable_sriov(pf->pdev);
+	else
+		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
 	/* Avoid wait time by stopping all VFs at the same time */
 	for (i = 0; i < pf->num_alloc_vfs; i++) {
 		if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
@@ -228,15 +235,6 @@ void ice_free_vfs(struct ice_pf *pf)
 		clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
 	}
 
-	/* Disable IOV before freeing resources. This lets any VF drivers
-	 * running in the host get themselves cleaned up before we yank
-	 * the carpet out from underneath their feet.
-	 */
-	if (!pci_vfs_assigned(pf->pdev))
-		pci_disable_sriov(pf->pdev);
-	else
-		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
 	tmp = pf->num_alloc_vfs;
 	pf->num_vf_qps = 0;
 	pf->num_alloc_vfs = 0;
@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
 
 	/* Clear this bit after VF initialization since we shouldn't reclaim
 	 * and reassign interrupts for synchronous or asynchronous VFR events.
-	 * We don't want to reconfigure interrupts since AVF driver doesn't
+	 * We dont want to reconfigure interrupts since AVF driver doesn't
 	 * expect vector assignment to be changed unless there is a request for
 	 * more vectors.
 	 */
@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
  * ice_process_vflr_event - Free VF resources via IRQ calls
  * @pf: pointer to the PF structure
  *
- * called from the VLFR IRQ handler to
+ * called from the VFLR IRQ handler to
  * free up VF resources and state variables
  */
 void ice_process_vflr_event(struct ice_pf *pf)
@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
 		/* copy Tx queue info from VF into VSI */
 		vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
 		vsi->tx_rings[i]->count = qpi->txq.ring_len;
-		/* copy Rx queue info from VF into vsi */
+		/* copy Rx queue info from VF into VSI */
 		vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
 		vsi->rx_rings[i]->count = qpi->rxq.ring_len;
 		if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
  * @msg: pointer to the msg buffer
  * @set: true if mac filters are being set, false otherwise
  *
- * add guest mac address filter
+ * add guest MAC address filter
  */
 static int
 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
@@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
  * @msg: pointer to the msg buffer
  *
  * VFs get a default number of queues but can use this message to request a
- * different number.  If the request is successful, PF will reset the VF and
+ * different number. If the request is successful, PF will reset the VF and
  * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
+ * available queue pairs via virtchnl message response to vf.
  */
 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
 {
@@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
 	tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
 	if (req_queues <= 0) {
 		dev_err(&pf->pdev->dev,
-			"VF %d tried to request %d queues.  Ignoring.\n",
+			"VF %d tried to request %d queues. Ignoring.\n",
 			vf->vf_id, req_queues);
 	} else if (req_queues > ICE_MAX_QS_PER_VF) {
 		dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 10131e0..01470a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,7 +70,7 @@ struct ice_vf {
 	u8 spoofchk;
 	u16 num_mac;
 	u16 num_vlan;
-	u8 num_req_qs;		/* num of queue pairs requested by VF */
+	u8 num_req_qs;			/* num of queue pairs requested by VF */
 };
 
 #ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 5acf3b7..c576710 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 
-	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
 		return -EOPNOTSUPP;
 
 	if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5df88ad..453ae1d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1850,13 +1850,12 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
 	 * configuration' in respect to these parameters.
 	 */
 
-	netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
-			    idleslope %d sendslope %d hiCredit %d \
-			    locredit %d\n",
-		   (ring->cbs_enable) ? "enabled" : "disabled",
-		   (ring->launchtime_enable) ? "enabled" : "disabled", queue,
-		   ring->idleslope, ring->sendslope, ring->hicredit,
-		   ring->locredit);
+	netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
+		   ring->cbs_enable ? "enabled" : "disabled",
+		   ring->launchtime_enable ? "enabled" : "disabled",
+		   queue,
+		   ring->idleslope, ring->sendslope,
+		   ring->hicredit, ring->locredit);
 }
 
 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
@@ -6019,6 +6018,8 @@ static int igb_tx_map(struct igb_ring *tx_ring,
 	/* set the timestamp */
 	first->time_stamp = jiffies;
 
+	skb_tx_timestamp(skb);
+
 	/* Force memory writes to complete before letting h/w know there
 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
 	 * memory model archs, such as IA-64).
@@ -6147,8 +6148,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
 	else if (!tso)
 		igb_tx_csum(tx_ring, first);
 
-	skb_tx_timestamp(skb);
-
 	if (igb_tx_map(tx_ring, first, hdr_len))
 		goto cleanup_tx_tstamp;
 
@@ -7753,11 +7752,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
 	if (!clean_complete)
 		return budget;
 
-	/* If not enough Rx work done, exit the polling mode */
-	napi_complete_done(napi, work_done);
-	igb_ring_irq_enable(q_vector);
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done)))
+		igb_ring_irq_enable(q_vector);
 
-	return 0;
+	return min(work_done, budget - 1);
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 2b95dc9..fd3071f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -277,17 +277,25 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
 	return 0;
 }
 
-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
-				 struct timespec64 *ts)
+static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp,
+				  struct timespec64 *ts,
+				  struct ptp_system_timestamp *sts)
 {
 	struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
 					       ptp_caps);
+	struct e1000_hw *hw = &igb->hw;
 	unsigned long flags;
+	u32 lo, hi;
 	u64 ns;
 
 	spin_lock_irqsave(&igb->tmreg_lock, flags);
 
-	ns = timecounter_read(&igb->tc);
+	ptp_read_system_prets(sts);
+	lo = rd32(E1000_SYSTIML);
+	ptp_read_system_postts(sts);
+	hi = rd32(E1000_SYSTIMH);
+
+	ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
 
 	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
 
@@ -296,16 +304,50 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
 	return 0;
 }
 
-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
-				struct timespec64 *ts)
+static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp,
+				  struct timespec64 *ts,
+				  struct ptp_system_timestamp *sts)
 {
 	struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
 					       ptp_caps);
+	struct e1000_hw *hw = &igb->hw;
+	unsigned long flags;
+	u32 lo, hi;
+	u64 ns;
+
+	spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+	ptp_read_system_prets(sts);
+	rd32(E1000_SYSTIMR);
+	ptp_read_system_postts(sts);
+	lo = rd32(E1000_SYSTIML);
+	hi = rd32(E1000_SYSTIMH);
+
+	ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
+
+	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp,
+				 struct timespec64 *ts,
+				 struct ptp_system_timestamp *sts)
+{
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+					       ptp_caps);
+	struct e1000_hw *hw = &igb->hw;
 	unsigned long flags;
 
 	spin_lock_irqsave(&igb->tmreg_lock, flags);
 
-	igb_ptp_read_i210(igb, ts);
+	ptp_read_system_prets(sts);
+	rd32(E1000_SYSTIMR);
+	ptp_read_system_postts(sts);
+	ts->tv_nsec = rd32(E1000_SYSTIML);
+	ts->tv_sec = rd32(E1000_SYSTIMH);
 
 	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
 
@@ -658,9 +700,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
 	struct igb_adapter *igb =
 		container_of(work, struct igb_adapter, ptp_overflow_work.work);
 	struct timespec64 ts;
+	u64 ns;
 
-	igb->ptp_caps.gettime64(&igb->ptp_caps, &ts);
+	/* Update the timecounter */
+	ns = timecounter_read(&igb->tc);
 
+	ts = ns_to_timespec64(ns);
 	pr_debug("igb overflow check at %lld.%09lu\n",
 		 (long long) ts.tv_sec, ts.tv_nsec);
 
@@ -1126,7 +1171,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
 		adapter->ptp_caps.pps = 0;
 		adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
 		adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
-		adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+		adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
 		adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
 		adapter->ptp_caps.enable = igb_ptp_feature_enable;
 		adapter->cc.read = igb_ptp_read_82576;
@@ -1145,7 +1190,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
 		adapter->ptp_caps.pps = 0;
 		adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
 		adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
-		adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+		adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
 		adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
 		adapter->ptp_caps.enable = igb_ptp_feature_enable;
 		adapter->cc.read = igb_ptp_read_82580;
@@ -1173,7 +1218,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
 		adapter->ptp_caps.pin_config = adapter->sdp_config;
 		adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
 		adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
-		adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
+		adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210;
 		adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
 		adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
 		adapter->ptp_caps.verify = igb_ptp_verify_pin;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 163e583..a3cd7ac 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
 	s32 err;
 	u16 i;
 
-	WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+	lockdep_assert_held(&hw->mbx_lock);
 
 	/* lock the mailbox to prevent pf/vf race condition */
 	err = e1000_obtain_mbx_lock_vf(hw);
@@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
 	s32 err;
 	u16 i;
 
-	WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+	lockdep_assert_held(&hw->mbx_lock);
 
 	/* lock the mailbox to prevent pf/vf race condition */
 	err = e1000_obtain_mbx_lock_vf(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 820d49e..4eab83f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1186,10 +1186,13 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
 
 	igbvf_clean_rx_irq(adapter, &work_done, budget);
 
-	/* If not enough Rx work done, exit the polling mode */
-	if (work_done < budget) {
-		napi_complete_done(napi, work_done);
+	if (work_done == budget)
+		return budget;
 
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done))) {
 		if (adapter->requested_itr & 3)
 			igbvf_set_itr(adapter);
 
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index cdf18a5..b1039dd 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -5,23 +5,12 @@
 #define _IGC_H_
 
 #include <linux/kobject.h>
-
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
-
 #include <linux/ethtool.h>
-
 #include <linux/sctp.h>
 
-#define IGC_ERR(args...) pr_err("igc: " args)
-
-#define PFX "igc: "
-
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-
 #include "igc_hw.h"
 
 /* main */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 832da609..df40af7 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
 {
 	struct igc_phy_info *phy = &hw->phy;
 	s32 ret_val = 0;
-	u32 ctrl_ext;
 
 	if (hw->phy.media_type != igc_media_type_copper) {
 		phy->type = igc_phy_none;
@@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
 	phy->reset_delay_us	= 100;
 
-	ctrl_ext = rd32(IGC_CTRL_EXT);
-
 	/* set lan id */
 	hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
 			IGC_STATUS_FUNC_SHIFT;
@@ -287,8 +284,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
 static s32 igc_get_invariants_base(struct igc_hw *hw)
 {
 	struct igc_mac_info *mac = &hw->mac;
-	u32 link_mode = 0;
-	u32 ctrl_ext = 0;
 	s32 ret_val = 0;
 
 	switch (hw->device_id) {
@@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
 
 	hw->phy.media_type = igc_media_type_copper;
 
-	ctrl_ext = rd32(IGC_CTRL_EXT);
-	link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
-
 	/* mac initialization and operations */
 	ret_val = igc_init_mac_params_base(hw);
 	if (ret_val)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9d85707..f201830 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring,
 	/* set the timestamp */
 	first->time_stamp = jiffies;
 
+	skb_tx_timestamp(skb);
+
 	/* Force memory writes to complete before letting h/w know there
 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
 	 * memory model archs, such as IA-64).
@@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
 	first->bytecount = skb->len;
 	first->gso_segs = 1;
 
-	skb_tx_timestamp(skb);
-
 	/* record initial flags and protocol */
 	first->tx_flags = tx_flags;
 	first->protocol = protocol;
@@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
 
 	/* update pointers within the skb to store the data */
 	skb_reserve(skb, IGC_SKB_PAD);
-	 __skb_put(skb, size);
+	__skb_put(skb, size);
 
 	/* update buffer offset */
 #if (PAGE_SIZE < 8192)
@@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
 				(va + headlen) - page_address(rx_buffer->page),
 				size, truesize);
 #if (PAGE_SIZE < 8192)
-	rx_buffer->page_offset ^= truesize;
+		rx_buffer->page_offset ^= truesize;
 #else
-	rx_buffer->page_offset += truesize;
+		rx_buffer->page_offset += truesize;
 #endif
 	} else {
 		rx_buffer->pagecnt_bias++;
@@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
 				tx_buffer->next_to_watch,
 				jiffies,
 				tx_buffer->next_to_watch->wb.status);
-				netif_stop_subqueue(tx_ring->netdev,
-						    tx_ring->queue_index);
+			netif_stop_subqueue(tx_ring->netdev,
+					    tx_ring->queue_index);
 
 			/* we are about to reset, no point in enabling stuff */
 			return true;
@@ -1700,20 +1700,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
 }
 
 /**
- * igc_ioctl - I/O control method
- * @netdev: network interface device structure
- * @ifreq: frequency
- * @cmd: command
- */
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
-	switch (cmd) {
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-/**
  * igc_up - Open the interface and prepare it to handle traffic
  * @adapter: board private structure
  */
@@ -2866,11 +2852,13 @@ static int igc_poll(struct napi_struct *napi, int budget)
 	if (!clean_complete)
 		return budget;
 
-	/* If not enough Rx work done, exit the polling mode */
-	napi_complete_done(napi, work_done);
-	igc_ring_irq_enable(q_vector);
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done)))
+		igc_ring_irq_enable(q_vector);
 
-	return 0;
+	return min(work_done, budget - 1);
 }
 
 /**
@@ -3358,7 +3346,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
 		goto err_req_irq;
 
 	/* Notify the stack of the actual queue counts. */
-	netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
 	if (err)
 		goto err_set_queues;
 
@@ -3445,7 +3433,6 @@ static const struct net_device_ops igc_netdev_ops = {
 	.ndo_set_mac_address	= igc_set_mac,
 	.ndo_change_mtu		= igc_change_mtu,
 	.ndo_get_stats		= igc_get_stats,
-	.ndo_do_ioctl		= igc_ioctl,
 };
 
 /* PCIe configuration access */
@@ -3532,26 +3519,23 @@ static int igc_probe(struct pci_dev *pdev,
 	struct net_device *netdev;
 	struct igc_hw *hw;
 	const struct igc_info *ei = igc_info_tbl[ent->driver_data];
-	int err, pci_using_dac;
+	int err;
 
 	err = pci_enable_device_mem(pdev);
 	if (err)
 		return err;
 
-	pci_using_dac = 0;
 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	if (!err) {
 		err = dma_set_coherent_mask(&pdev->dev,
 					    DMA_BIT_MASK(64));
-		if (!err)
-			pci_using_dac = 1;
 	} else {
 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
 			err = dma_set_coherent_mask(&pdev->dev,
 						    DMA_BIT_MASK(32));
 			if (err) {
-				IGC_ERR("Wrong DMA configuration, aborting\n");
+				dev_err(&pdev->dev, "igc: Wrong DMA config\n");
 				goto err_dma;
 			}
 		}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 732b1e6..acba067 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
+			    WAKE_FILTER))
 		return -EOPNOTSUPP;
 
 	if (ixgbe_wol_exclusion(adapter, wol))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index fd1b054..4d77f42 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -4,6 +4,7 @@
 #include "ixgbe.h"
 #include <net/xfrm.h>
 #include <crypto/aead.h>
+#include <linux/if_bridge.h>
 
 #define IXGBE_IPSEC_KEY_BITS  160
 static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
@@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
 	} else {
 		struct tx_sa tsa;
 
-		if (adapter->num_vfs)
+		if (adapter->num_vfs &&
+		    adapter->bridge_mode != BRIDGE_MODE_VEPA)
 			return -EOPNOTSUPP;
 
 		/* find the first unused index */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 113b38e..49a4ea3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6077,9 +6077,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 	/* Disable Rx */
 	ixgbe_disable_rx(adapter);
 
-	/* synchronize_sched() needed for pending XDP buffers to drain */
+	/* synchronize_rcu() needed for pending XDP buffers to drain */
 	if (adapter->xdp_ring[0])
-		synchronize_sched();
+		synchronize_rcu();
 
 	ixgbe_irq_disable(adapter);
 
@@ -8269,6 +8269,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 	/* set the timestamp */
 	first->time_stamp = jiffies;
 
+	skb_tx_timestamp(skb);
+
 	/*
 	 * Force memory writes to complete before letting h/w know there
 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
@@ -8646,8 +8648,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 		}
 	}
 
-	skb_tx_timestamp(skb);
-
 #ifdef CONFIG_PCI_IOV
 	/*
 	 * Use the l2switch_enable flag - would be false if the DMA
@@ -10476,7 +10476,7 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
 	ixgbe_disable_rxr_hw(adapter, rx_ring);
 
 	if (xdp_ring)
-		synchronize_sched();
+		synchronize_rcu();
 
 	/* Rx/Tx/XDP Tx share the same napi context. */
 	napi_disable(&rx_ring->q_vector->napi);
@@ -10517,7 +10517,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
 	ixgbe_configure_rx_ring(adapter, rx_ring);
 
 	clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
-	clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+	if (xdp_ring)
+		clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index b3e0d8b..d81a50d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -443,22 +443,52 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 }
 
 /**
- * ixgbe_ptp_gettime
+ * ixgbe_ptp_gettimex
  * @ptp: the ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec to hold the PHC timestamp
+ * @sts: structure to hold the system time before and after reading the PHC
  *
  * read the timecounter and return the correct value on ns,
  * after converting it into a struct timespec.
  */
-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
+			      struct timespec64 *ts,
+			      struct ptp_system_timestamp *sts)
 {
 	struct ixgbe_adapter *adapter =
 		container_of(ptp, struct ixgbe_adapter, ptp_caps);
+	struct ixgbe_hw *hw = &adapter->hw;
 	unsigned long flags;
-	u64 ns;
+	u64 ns, stamp;
 
 	spin_lock_irqsave(&adapter->tmreg_lock, flags);
-	ns = timecounter_read(&adapter->hw_tc);
+
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
+		/* Upper 32 bits represent billions of cycles, lower 32 bits
+		 * represent cycles. However, we use timespec64_to_ns for the
+		 * correct math even though the units haven't been corrected
+		 * yet.
+		 */
+		ptp_read_system_prets(sts);
+		IXGBE_READ_REG(hw, IXGBE_SYSTIMR);
+		ptp_read_system_postts(sts);
+		ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+		ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH);
+		stamp = timespec64_to_ns(ts);
+		break;
+	default:
+		ptp_read_system_prets(sts);
+		stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+		ptp_read_system_postts(sts);
+		stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+		break;
+	}
+
+	ns = timecounter_cyc2time(&adapter->hw_tc, stamp);
+
 	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 
 	*ts = ns_to_timespec64(ns);
@@ -567,10 +597,14 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
 {
 	bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
 					     IXGBE_OVERFLOW_PERIOD);
-	struct timespec64 ts;
+	unsigned long flags;
 
 	if (timeout) {
-		ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
+		/* Update the timecounter */
+		spin_lock_irqsave(&adapter->tmreg_lock, flags);
+		timecounter_read(&adapter->hw_tc);
+		spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
 		adapter->last_overflow_check = jiffies;
 	}
 }
@@ -1216,7 +1250,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
 		adapter->ptp_caps.pps = 1;
 		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
 		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
-		adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+		adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
 		adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
 		adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
 		adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540;
@@ -1233,7 +1267,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
 		adapter->ptp_caps.pps = 0;
 		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
 		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
-		adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+		adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
 		adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
 		adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
 		break;
@@ -1249,7 +1283,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
 		adapter->ptp_caps.pps = 0;
 		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
 		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
-		adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+		adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
 		adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
 		adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
 		adapter->ptp_setup_sdp = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 5e47ede..2de81f0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1293,16 +1293,20 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
 	/* If all work not completed, return budget and keep polling */
 	if (!clean_complete)
 		return budget;
-	/* all work done, exit the polling mode */
-	napi_complete_done(napi, work_done);
-	if (adapter->rx_itr_setting == 1)
-		ixgbevf_set_itr(q_vector);
-	if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
-	    !test_bit(__IXGBEVF_REMOVING, &adapter->state))
-		ixgbevf_irq_enable_queues(adapter,
-					  BIT(q_vector->v_idx));
 
-	return 0;
+	/* Exit the polling mode, but don't re-enable interrupts if stack might
+	 * poll us due to busy-polling
+	 */
+	if (likely(napi_complete_done(napi, work_done))) {
+		if (adapter->rx_itr_setting == 1)
+			ixgbevf_set_itr(q_vector);
+		if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+		    !test_bit(__IXGBEVF_REMOVING, &adapter->state))
+			ixgbevf_irq_enable_queues(adapter,
+						  BIT(q_vector->v_idx));
+	}
+
+	return min(work_done, budget - 1);
 }
 
 /**
@@ -4016,6 +4020,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
 	/* set the timestamp */
 	first->time_stamp = jiffies;
 
+	skb_tx_timestamp(skb);
+
 	/* Force memory writes to complete before letting h/w know there
 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
 	 * memory model archs, such as IA-64).
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1e9bcbd..2f42727 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1499,23 +1499,16 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
 				   struct ethtool_link_ksettings *cmd)
 {
 	struct net_device *dev = mp->dev;
-	u32 supported, advertising;
 
 	phy_ethtool_ksettings_get(dev->phydev, cmd);
 
 	/*
 	 * The MAC does not support 1000baseT_Half.
 	 */
-	ethtool_convert_link_mode_to_legacy_u32(&supported,
-						cmd->link_modes.supported);
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						cmd->link_modes.advertising);
-	supported &= ~SUPPORTED_1000baseT_Half;
-	advertising &= ~ADVERTISED_1000baseT_Half;
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-						supported);
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-						advertising);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			   cmd->link_modes.supported);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			   cmd->link_modes.advertising);
 
 	return 0;
 }
@@ -3031,10 +3024,12 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
 		phy->autoneg = AUTONEG_ENABLE;
 		phy->speed = 0;
 		phy->duplex = 0;
-		phy->advertising = phy->supported | ADVERTISED_Autoneg;
+		linkmode_copy(phy->advertising, phy->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				 phy->advertising);
 	} else {
 		phy->autoneg = AUTONEG_DISABLE;
-		phy->advertising = 0;
+		linkmode_zero(phy->advertising);
 		phy->speed = speed;
 		phy->duplex = duplex;
 	}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e5397c8..46a0f6b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4248,8 +4248,7 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
 
 	/* The Armada 37x documents do not give limits for this other than
 	 * it being an 8-bit register. */
-	if (eee->tx_lpi_enabled &&
-	    (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+	if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
 		return -EINVAL;
 
 	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 12db256..4c94571e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -31,6 +31,7 @@
  * @resp:		command response
  * @link_info:		link related information
  * @event_cb:		callback for linkchange events
+ * @event_cb_lock:	lock for serializing callback with unregister
  * @cmd_pend:		flag set before new command is started
  *			flag cleared after command response is received
  * @cgx:		parent cgx port
@@ -43,6 +44,7 @@ struct lmac {
 	u64 resp;
 	struct cgx_link_user_info link_info;
 	struct cgx_event_cb event_cb;
+	spinlock_t event_cb_lock;
 	bool cmd_pend;
 	struct cgx *cgx;
 	u8 lmac_id;
@@ -55,6 +57,8 @@ struct cgx {
 	u8			cgx_id;
 	u8			lmac_count;
 	struct lmac		*lmac_idmap[MAX_LMAC_PER_CGX];
+	struct			work_struct cgx_cmd_work;
+	struct			workqueue_struct *cgx_cmd_workq;
 	struct list_head	cgx_list;
 };
 
@@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
 /* Convert firmware lmac type encoding to string */
 static char *cgx_lmactype_string[LMAC_MODE_MAX];
 
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
 /* Supported devices */
 static const struct pci_device_id cgx_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
@@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
 	return cgx->lmac_idmap[lmac_id];
 }
 
-int cgx_get_cgx_cnt(void)
+int cgx_get_cgxcnt_max(void)
 {
 	struct cgx *cgx_dev;
-	int count = 0;
+	int idmax = -ENODEV;
 
 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
-		count++;
+		if (cgx_dev->cgx_id > idmax)
+			idmax = cgx_dev->cgx_id;
 
-	return count;
+	if (idmax < 0)
+		return 0;
+
+	return idmax + 1;
 }
-EXPORT_SYMBOL(cgx_get_cgx_cnt);
+EXPORT_SYMBOL(cgx_get_cgxcnt_max);
 
 int cgx_get_lmac_cnt(void *cgxd)
 {
@@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat,
 	lmac->link_info = event.link_uinfo;
 	linfo = &lmac->link_info;
 
+	/* Ensure callback doesn't get unregistered until we finish it */
+	spin_lock(&lmac->event_cb_lock);
+
 	if (!lmac->event_cb.notify_link_chg) {
 		dev_dbg(dev, "cgx port %d:%d Link change handler null",
 			cgx->cgx_id, lmac->lmac_id);
@@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat,
 		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
 			 cgx->cgx_id, lmac->lmac_id,
 			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
-		return;
+		goto err;
 	}
 
 	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
 		dev_err(dev, "event notification failure\n");
+err:
+	spin_unlock(&lmac->event_cb_lock);
 }
 
 static inline bool cgx_cmdresp_is_linkevent(u64 event)
@@ -548,6 +564,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
 }
 EXPORT_SYMBOL(cgx_lmac_evh_register);
 
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
+{
+	struct lmac *lmac;
+	unsigned long flags;
+	struct cgx *cgx = cgxd;
+
+	lmac = lmac_pdata(lmac_id, cgx);
+	if (!lmac)
+		return -ENODEV;
+
+	spin_lock_irqsave(&lmac->event_cb_lock, flags);
+	lmac->event_cb.notify_link_chg = NULL;
+	lmac->event_cb.data = NULL;
+	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+	u64 req = 0;
+	u64 resp;
+
+	if (enable)
+		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+	else
+		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+
+	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
 {
 	u64 req = 0;
@@ -581,6 +629,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
 		return 0;
 }
 
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+	struct device *dev = &cgx->pdev->dev;
+	int i, err;
+
+	/* Do Link up for all the lmacs */
+	for (i = 0; i < cgx->lmac_count; i++) {
+		err = cgx_fwi_link_change(cgx, i, true);
+		if (err)
+			dev_info(dev, "cgx port %d:%d Link up command failed\n",
+				 cgx->cgx_id, i);
+	}
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+	struct cgx *cgx = cgxd;
+
+	if (!cgx)
+		return -ENODEV;
+
+	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_linkup_start);
+
 static int cgx_lmac_init(struct cgx *cgx)
 {
 	struct lmac *lmac;
@@ -602,6 +678,7 @@ static int cgx_lmac_init(struct cgx *cgx)
 		lmac->cgx = cgx;
 		init_waitqueue_head(&lmac->wq_cmd_cmplt);
 		mutex_init(&lmac->cmd_lock);
+		spin_lock_init(&lmac->event_cb_lock);
 		err = request_irq(pci_irq_vector(cgx->pdev,
 						 CGX_LMAC_FWI + i * 9),
 				   cgx_fwi_event_handler, 0, lmac->name, lmac);
@@ -624,6 +701,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
 	struct lmac *lmac;
 	int i;
 
+	if (cgx->cgx_cmd_workq) {
+		flush_workqueue(cgx->cgx_cmd_workq);
+		destroy_workqueue(cgx->cgx_cmd_workq);
+		cgx->cgx_cmd_workq = NULL;
+	}
+
 	/* Free all lmac related resources */
 	for (i = 0; i < cgx->lmac_count; i++) {
 		lmac = cgx->lmac_idmap[i];
@@ -679,8 +762,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto err_release_regions;
 	}
 
+	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+		& CGX_ID_MASK;
+
+	/* init wq for processing linkup requests */
+	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+	if (!cgx->cgx_cmd_workq) {
+		dev_err(dev, "alloc workqueue failed for cgx cmd");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
 	list_add(&cgx->cgx_list, &cgx_list);
-	cgx->cgx_id = cgx_get_cgx_cnt() - 1;
 
 	cgx_link_usertable_init();
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 0a66d27..8c2be84 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -20,40 +20,41 @@
 /* PCI BAR nos */
 #define PCI_CFG_REG_BAR_NUM		0
 
-#define MAX_CGX				3
+#define CGX_ID_MASK			0x7
 #define MAX_LMAC_PER_CGX		4
+#define CGX_FIFO_LEN			65536 /* 64K for both Rx & Tx */
 #define CGX_OFFSET(x)			((x) * MAX_LMAC_PER_CGX)
 
 /* Registers */
 #define CGXX_CMRX_CFG			0x00
-#define  CMR_EN					BIT_ULL(55)
-#define  DATA_PKT_TX_EN				BIT_ULL(53)
-#define  DATA_PKT_RX_EN				BIT_ULL(54)
-#define  CGX_LMAC_TYPE_SHIFT			40
-#define  CGX_LMAC_TYPE_MASK			0xF
+#define CMR_EN				BIT_ULL(55)
+#define DATA_PKT_TX_EN			BIT_ULL(53)
+#define DATA_PKT_RX_EN			BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT		40
+#define CGX_LMAC_TYPE_MASK		0xF
 #define CGXX_CMRX_INT			0x040
-#define  FW_CGX_INT				BIT_ULL(1)
+#define FW_CGX_INT			BIT_ULL(1)
 #define CGXX_CMRX_INT_ENA_W1S		0x058
 #define CGXX_CMRX_RX_ID_MAP		0x060
 #define CGXX_CMRX_RX_STAT0		0x070
 #define CGXX_CMRX_RX_LMACS		0x128
 #define CGXX_CMRX_RX_DMAC_CTL0		0x1F8
-#define  CGX_DMAC_CTL0_CAM_ENABLE		BIT_ULL(3)
-#define  CGX_DMAC_CAM_ACCEPT			BIT_ULL(3)
-#define  CGX_DMAC_MCAST_MODE			BIT_ULL(1)
-#define  CGX_DMAC_BCAST_MODE			BIT_ULL(0)
+#define CGX_DMAC_CTL0_CAM_ENABLE	BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT		BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE		BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE		BIT_ULL(0)
 #define CGXX_CMRX_RX_DMAC_CAM0		0x200
-#define  CGX_DMAC_CAM_ADDR_ENABLE		BIT_ULL(48)
+#define CGX_DMAC_CAM_ADDR_ENABLE	BIT_ULL(48)
 #define CGXX_CMRX_RX_DMAC_CAM1		0x400
-#define CGX_RX_DMAC_ADR_MASK			GENMASK_ULL(47, 0)
+#define CGX_RX_DMAC_ADR_MASK		GENMASK_ULL(47, 0)
 #define CGXX_CMRX_TX_STAT0		0x700
 #define CGXX_SCRATCH0_REG		0x1050
 #define CGXX_SCRATCH1_REG		0x1058
 #define CGX_CONST			0x2000
 #define CGXX_SPUX_CONTROL1		0x10000
-#define  CGXX_SPUX_CONTROL1_LBK			BIT_ULL(14)
+#define CGXX_SPUX_CONTROL1_LBK		BIT_ULL(14)
 #define CGXX_GMP_PCS_MRX_CTL		0x30000
-#define  CGXX_GMP_PCS_MRX_CTL_LBK		BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL_LBK	BIT_ULL(14)
 
 #define CGX_COMMAND_REG			CGXX_SCRATCH1_REG
 #define CGX_EVENT_REG			CGXX_SCRATCH0_REG
@@ -94,11 +95,12 @@ struct cgx_event_cb {
 
 extern struct pci_driver cgx_driver;
 
-int cgx_get_cgx_cnt(void);
+int cgx_get_cgxcnt_max(void);
 int cgx_get_lmac_cnt(void *cgxd);
 void *cgx_get_pdata(int cgx_id);
 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
@@ -108,4 +110,5 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
 int cgx_get_link_info(void *cgxd, int lmac_id,
 		      struct cgx_link_user_info *linfo);
+int cgx_lmac_linkup_start(void *cgxd);
 #endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index fa17af3..2d9fe51 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -78,8 +78,6 @@ enum cgx_cmd_id {
 	CGX_CMD_LINK_STATE_CHANGE,
 	CGX_CMD_MODE_CHANGE,		/* hot plug support */
 	CGX_CMD_INTF_SHUTDOWN,
-	CGX_CMD_IRQ_ENABLE,
-	CGX_CMD_IRQ_DISABLE,
 };
 
 /* async event ids */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d39ada4..9bddb03 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,6 +143,11 @@ enum nix_scheduler {
 	NIX_TXSCH_LVL_CNT = 0x5,
 };
 
+/* Min/Max packet sizes, excluding FCS */
+#define	NIC_HW_MIN_FRS			40
+#define	NIC_HW_MAX_FRS			9212
+#define	SDP_HW_MAX_FRS			65535
+
 /* NIX RX action operation*/
 #define NIX_RX_ACTIONOP_DROP		(0x0ull)
 #define NIX_RX_ACTIONOP_UCAST		(0x1ull)
@@ -169,7 +174,9 @@ enum nix_scheduler {
 
 #define MAX_LMAC_PKIND			12
 #define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a)			(12 + (a))
 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
 
 /* NIX LSO format indices.
  * As of now TSO is the only one using, so statically assigning indices.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 85ba24a..d6f9ed8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(otx2_mbox_nonempty);
 const char *otx2_mbox_id2name(u16 id)
 {
 	switch (id) {
-#define M(_name, _id, _1, _2) case _id: return # _name;
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
 	MBOX_MESSAGES
 #undef M
 	default:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a15a59c..292c13a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -120,54 +120,93 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
 
 #define MBOX_MESSAGES							\
 /* Generic mbox IDs (range 0x000 - 0x1FF) */				\
-M(READY,		0x001, msg_req, ready_msg_rsp)			\
-M(ATTACH_RESOURCES,	0x002, rsrc_attach, msg_rsp)			\
-M(DETACH_RESOURCES,	0x003, rsrc_detach, msg_rsp)			\
-M(MSIX_OFFSET,		0x004, msg_req, msix_offset_rsp)		\
+M(READY,		0x001, ready, msg_req, ready_msg_rsp)		\
+M(ATTACH_RESOURCES,	0x002, attach_resources, rsrc_attach, msg_rsp)	\
+M(DETACH_RESOURCES,	0x003, detach_resources, rsrc_detach, msg_rsp)	\
+M(MSIX_OFFSET,		0x004, msix_offset, msg_req, msix_offset_rsp)	\
+M(VF_FLR,		0x006, vf_flr, msg_req, msg_rsp)		\
 /* CGX mbox IDs (range 0x200 - 0x3FF) */				\
-M(CGX_START_RXTX,	0x200, msg_req, msg_rsp)			\
-M(CGX_STOP_RXTX,	0x201, msg_req, msg_rsp)			\
-M(CGX_STATS,		0x202, msg_req, cgx_stats_rsp)			\
-M(CGX_MAC_ADDR_SET,	0x203, cgx_mac_addr_set_or_get,			\
+M(CGX_START_RXTX,	0x200, cgx_start_rxtx, msg_req, msg_rsp)	\
+M(CGX_STOP_RXTX,	0x201, cgx_stop_rxtx, msg_req, msg_rsp)		\
+M(CGX_STATS,		0x202, cgx_stats, msg_req, cgx_stats_rsp)	\
+M(CGX_MAC_ADDR_SET,	0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get,    \
 				cgx_mac_addr_set_or_get)		\
-M(CGX_MAC_ADDR_GET,	0x204, cgx_mac_addr_set_or_get,			\
+M(CGX_MAC_ADDR_GET,	0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get,    \
 				cgx_mac_addr_set_or_get)		\
-M(CGX_PROMISC_ENABLE,	0x205, msg_req, msg_rsp)			\
-M(CGX_PROMISC_DISABLE,	0x206, msg_req, msg_rsp)			\
-M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp)			\
-M(CGX_STOP_LINKEVENTS,	0x208, msg_req, msg_rsp)			\
-M(CGX_GET_LINKINFO,	0x209, msg_req, cgx_link_info_msg)		\
-M(CGX_INTLBK_ENABLE,	0x20A, msg_req, msg_rsp)			\
-M(CGX_INTLBK_DISABLE,	0x20B, msg_req, msg_rsp)			\
+M(CGX_PROMISC_ENABLE,	0x205, cgx_promisc_enable, msg_req, msg_rsp)	\
+M(CGX_PROMISC_DISABLE,	0x206, cgx_promisc_disable, msg_req, msg_rsp)	\
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp)	\
+M(CGX_STOP_LINKEVENTS,	0x208, cgx_stop_linkevents, msg_req, msg_rsp)	\
+M(CGX_GET_LINKINFO,	0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE,	0x20A, cgx_intlbk_enable, msg_req, msg_rsp)	\
+M(CGX_INTLBK_DISABLE,	0x20B, cgx_intlbk_disable, msg_req, msg_rsp)	\
 /* NPA mbox IDs (range 0x400 - 0x5FF) */				\
-M(NPA_LF_ALLOC,		0x400, npa_lf_alloc_req, npa_lf_alloc_rsp)	\
-M(NPA_LF_FREE,		0x401, msg_req, msg_rsp)			\
-M(NPA_AQ_ENQ,		0x402, npa_aq_enq_req, npa_aq_enq_rsp)		\
-M(NPA_HWCTX_DISABLE,	0x403, hwctx_disable_req, msg_rsp)		\
+M(NPA_LF_ALLOC,		0x400, npa_lf_alloc,				\
+				npa_lf_alloc_req, npa_lf_alloc_rsp)	\
+M(NPA_LF_FREE,		0x401, npa_lf_free, msg_req, msg_rsp)		\
+M(NPA_AQ_ENQ,		0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp)   \
+M(NPA_HWCTX_DISABLE,	0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
 /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */				\
 /* TIM mbox IDs (range 0x800 - 0x9FF) */				\
 /* CPT mbox IDs (range 0xA00 - 0xBFF) */				\
 /* NPC mbox IDs (range 0x6000 - 0x7FFF) */				\
+M(NPC_MCAM_ALLOC_ENTRY,	0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
+				npc_mcam_alloc_entry_rsp)		\
+M(NPC_MCAM_FREE_ENTRY,	0x6001, npc_mcam_free_entry,			\
+				 npc_mcam_free_entry_req, msg_rsp)	\
+M(NPC_MCAM_WRITE_ENTRY,	0x6002, npc_mcam_write_entry,			\
+				 npc_mcam_write_entry_req, msg_rsp)	\
+M(NPC_MCAM_ENA_ENTRY,   0x6003, npc_mcam_ena_entry,			\
+				 npc_mcam_ena_dis_entry_req, msg_rsp)	\
+M(NPC_MCAM_DIS_ENTRY,   0x6004, npc_mcam_dis_entry,			\
+				 npc_mcam_ena_dis_entry_req, msg_rsp)	\
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
+				npc_mcam_shift_entry_rsp)		\
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter,		\
+					npc_mcam_alloc_counter_req,	\
+					npc_mcam_alloc_counter_rsp)	\
+M(NPC_MCAM_FREE_COUNTER,  0x6007, npc_mcam_free_counter,		\
+				    npc_mcam_oper_counter_req, msg_rsp)	\
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter,		\
+				   npc_mcam_unmap_counter_req, msg_rsp)	\
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter,		\
+				   npc_mcam_oper_counter_req, msg_rsp)	\
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats,		\
+				   npc_mcam_oper_counter_req,		\
+				   npc_mcam_oper_counter_rsp)		\
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry,      \
+					  npc_mcam_alloc_and_write_entry_req,  \
+					  npc_mcam_alloc_and_write_entry_rsp)  \
+M(NPC_GET_KEX_CFG,	  0x600c, npc_get_kex_cfg,			\
+				   msg_req, npc_get_kex_cfg_rsp)	\
 /* NIX mbox IDs (range 0x8000 - 0xFFFF) */				\
-M(NIX_LF_ALLOC,		0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp)	\
-M(NIX_LF_FREE,		0x8001, msg_req, msg_rsp)			\
-M(NIX_AQ_ENQ,		0x8002, nix_aq_enq_req, nix_aq_enq_rsp)		\
-M(NIX_HWCTX_DISABLE,	0x8003, hwctx_disable_req, msg_rsp)		\
-M(NIX_TXSCH_ALLOC,	0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
-M(NIX_TXSCH_FREE,	0x8005, nix_txsch_free_req, msg_rsp)		\
-M(NIX_TXSCHQ_CFG,	0x8006, nix_txschq_config, msg_rsp)		\
-M(NIX_STATS_RST,	0x8007, msg_req, msg_rsp)			\
-M(NIX_VTAG_CFG,	0x8008, nix_vtag_config, msg_rsp)		\
-M(NIX_RSS_FLOWKEY_CFG,  0x8009, nix_rss_flowkey_cfg, msg_rsp)		\
-M(NIX_SET_MAC_ADDR,	0x800a, nix_set_mac_addr, msg_rsp)		\
-M(NIX_SET_RX_MODE,	0x800b, nix_rx_mode, msg_rsp)
+M(NIX_LF_ALLOC,		0x8000, nix_lf_alloc,				\
+				 nix_lf_alloc_req, nix_lf_alloc_rsp)	\
+M(NIX_LF_FREE,		0x8001, nix_lf_free, msg_req, msg_rsp)		\
+M(NIX_AQ_ENQ,		0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp)  \
+M(NIX_HWCTX_DISABLE,	0x8003, nix_hwctx_disable,			\
+				 hwctx_disable_req, msg_rsp)		\
+M(NIX_TXSCH_ALLOC,	0x8004, nix_txsch_alloc,			\
+				 nix_txsch_alloc_req, nix_txsch_alloc_rsp)   \
+M(NIX_TXSCH_FREE,	0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG,	0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp)  \
+M(NIX_STATS_RST,	0x8007, nix_stats_rst, msg_req, msg_rsp)	\
+M(NIX_VTAG_CFG,		0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp)	\
+M(NIX_RSS_FLOWKEY_CFG,  0x8009, nix_rss_flowkey_cfg,			\
+				 nix_rss_flowkey_cfg, msg_rsp)		\
+M(NIX_SET_MAC_ADDR,	0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE,	0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp)	\
+M(NIX_SET_HW_FRS,	0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp)	\
+M(NIX_LF_START_RX,	0x800d, nix_lf_start_rx, msg_req, msg_rsp)	\
+M(NIX_LF_STOP_RX,	0x800e, nix_lf_stop_rx, msg_req, msg_rsp)	\
+M(NIX_RXVLAN_ALLOC,	0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES						\
-M(CGX_LINK_EVENT,		0xC00, cgx_link_info_msg, msg_rsp)
+M(CGX_LINK_EVENT,	0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
 
 enum {
-#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
 MBOX_MESSAGES
 MBOX_UP_CGX_MESSAGES
 #undef M
@@ -191,6 +230,13 @@ struct msg_rsp {
 	struct mbox_msghdr hdr;
 };
 
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+	RVU_INVALID_VF_ID           = -256,
+};
+
 struct ready_msg_rsp {
 	struct mbox_msghdr hdr;
 	u16    sclk_feq;	/* SCLK frequency */
@@ -347,6 +393,8 @@ struct hwctx_disable_req {
 	u8 ctype;
 };
 
+/* NIX mbox message formats */
+
 /* NIX mailbox error codes
  * Range 401 - 500.
  */
@@ -365,6 +413,8 @@ enum nix_af_status {
 	NIX_AF_INVAL_TXSCHQ_CFG     = -412,
 	NIX_AF_SMQ_FLUSH_FAILED     = -413,
 	NIX_AF_ERR_LF_RESET         = -414,
+	NIX_AF_INVAL_NPA_PF_FUNC    = -419,
+	NIX_AF_INVAL_SSO_PF_FUNC    = -420,
 };
 
 /* For NIX LF context alloc and init */
@@ -392,6 +442,10 @@ struct nix_lf_alloc_rsp {
 	u8	lso_tsov4_idx;
 	u8	lso_tsov6_idx;
 	u8      mac_addr[ETH_ALEN];
+	u8	lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+	u8	lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+	u16	cints; /* NIX_AF_CONST2::CINTS */
+	u16	qints; /* NIX_AF_CONST2::QINTS */
 };
 
 /* NIX AQ enqueue msg */
@@ -472,6 +526,7 @@ struct nix_txschq_config {
 
 struct nix_vtag_config {
 	struct mbox_msghdr hdr;
+	/* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
 	u8 vtag_size;
 	/* cfg_type is '0' for tx vlan cfg
 	 * cfg_type is '1' for rx vlan cfg
@@ -492,7 +547,7 @@ struct nix_vtag_config {
 
 		/* valid when cfg_type is '1' */
 		struct {
-			/* rx vtag type index */
+			/* rx vtag type index, valid values are in 0..7 range */
 			u8 vtag_type;
 			/* rx vtag strip */
 			u8 strip_vtag :1;
@@ -522,4 +577,159 @@ struct nix_rx_mode {
 	u16	mode;
 };
 
+struct nix_frs_cfg {
+	struct mbox_msghdr hdr;
+	u8	update_smq;    /* Update SMQ's min/max lens */
+	u8	update_minlen; /* Set minlen also */
+	u8	sdp_link;      /* Set SDP RX link */
+	u16	maxlen;
+	u16	minlen;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID	0xFFFF
+#define NPC_MCAM_INVALID_MAP	0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+	NPC_MCAM_INVALID_REQ	= -701,
+	NPC_MCAM_ALLOC_DENIED	= -702,
+	NPC_MCAM_ALLOC_FAILED	= -703,
+	NPC_MCAM_PERM_DENIED	= -704,
+};
+
+struct npc_mcam_alloc_entry_req {
+	struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES	256
+	u8  contig;   /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO		0
+#define NPC_MCAM_LOWER_PRIO		1
+#define NPC_MCAM_HIGHER_PRIO		2
+	u8  priority; /* Lower or higher w.r.t ref_entry */
+	u16 ref_entry;
+	u16 count;    /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+	struct mbox_msghdr hdr;
+	u16 entry; /* Entry allocated or start index if contiguous.
+		    * Invalid incase of non-contiguous.
+		    */
+	u16 count; /* Number of entries allocated */
+	u16 free_count; /* Number of entries available */
+	u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+	struct mbox_msghdr hdr;
+	u16 entry; /* Entry index to be freed */
+	u8  all;   /* If all entries allocated to this PFVF to be freed */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY	7 /* Number of keywords in max keywidth */
+	u64	kw[NPC_MAX_KWS_IN_KEY];
+	u64	kw_mask[NPC_MAX_KWS_IN_KEY];
+	u64	action;
+	u64	vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+	struct mbox_msghdr hdr;
+	struct mcam_entry entry_data;
+	u16 entry;	 /* MCAM entry to write this match key */
+	u16 cntr;	 /* Counter for this MCAM entry */
+	u8  intf;	 /* Rx or Tx interface */
+	u8  enable_entry;/* Enable this MCAM entry ? */
+	u8  set_cntr;    /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+	struct mbox_msghdr hdr;
+	u16 entry;
+};
+
+struct npc_mcam_shift_entry_req {
+	struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS	64
+	u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
+	u16 new_entry[NPC_MCAM_MAX_SHIFTS];
+	u16 shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+	struct mbox_msghdr hdr;
+	u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
+};
+
+struct npc_mcam_alloc_counter_req {
+	struct mbox_msghdr hdr;
+	u8  contig;	/* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS       64
+	u16 count;	/* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+	struct mbox_msghdr hdr;
+	u16 cntr;   /* Counter allocated or start index if contiguous.
+		     * Invalid incase of non-contiguous.
+		     */
+	u16 count;  /* Number of counters allocated */
+	u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+	struct mbox_msghdr hdr;
+	u16 cntr;   /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+	struct mbox_msghdr hdr;
+	u64 stat;  /* valid only while fetching counter's stats */
+};
+
+struct npc_mcam_unmap_counter_req {
+	struct mbox_msghdr hdr;
+	u16 cntr;
+	u16 entry; /* Entry and counter to be unmapped */
+	u8  all;   /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+	struct mbox_msghdr hdr;
+	struct mcam_entry entry_data;
+	u16 ref_entry;
+	u8  priority;    /* Lower or higher w.r.t ref_entry */
+	u8  intf;	 /* Rx or Tx interface */
+	u8  enable_entry;/* Enable this MCAM entry ? */
+	u8  alloc_cntr;  /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+	struct mbox_msghdr hdr;
+	u16 entry;
+	u16 cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u64 rx_keyx_cfg;   /* NPC_AF_INTF(0)_KEX_CFG */
+	u64 tx_keyx_cfg;   /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF	2
+#define NPC_MAX_LID	8
+#define NPC_MAX_LT	16
+#define NPC_MAX_LD	2
+#define NPC_MAX_LFL	16
+	/* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+	u64 kex_ld_flags[NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+	u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+	u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+};
+
 #endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index f98b011..a7a20af 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -259,4 +259,10 @@ struct nix_rx_action {
 #endif
 };
 
+/* NIX Receive Vtag Action Structure */
+#define VTAG0_VALID_BIT		BIT_ULL(15)
+#define VTAG0_TYPE_MASK		GENMASK_ULL(14, 12)
+#define VTAG0_LID_MASK		GENMASK_ULL(10, 8)
+#define VTAG0_RELPTR_MASK	GENMASK_ULL(7, 0)
+
 #endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index dc28fa2..4d061d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -29,6 +29,16 @@ static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
 				struct rvu_block *block, int lf);
 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
 				  struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+			 int type, int num,
+			 void (mbox_handler)(struct work_struct *),
+			 void (mbox_up_handler)(struct work_struct *));
+enum {
+	TYPE_AFVF,
+	TYPE_AFPF,
+};
 
 /* Supported devices */
 static const struct pci_device_id rvu_id_table[] = {
@@ -153,17 +163,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
 	u16 match = 0;
 	int lf;
 
-	spin_lock(&rvu->rsrc_lock);
+	mutex_lock(&rvu->rsrc_lock);
 	for (lf = 0; lf < block->lf.max; lf++) {
 		if (block->fn_map[lf] == pcifunc) {
 			if (slot == match) {
-				spin_unlock(&rvu->rsrc_lock);
+				mutex_unlock(&rvu->rsrc_lock);
 				return lf;
 			}
 			match++;
 		}
 	}
-	spin_unlock(&rvu->rsrc_lock);
+	mutex_unlock(&rvu->rsrc_lock);
 	return -ENODEV;
 }
 
@@ -337,6 +347,28 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
 		return &rvu->pf[rvu_get_pf(pcifunc)];
 }
 
+static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
+{
+	int pf, vf, nvfs;
+	u64 cfg;
+
+	pf = rvu_get_pf(pcifunc);
+	if (pf >= rvu->hw->total_pfs)
+		return false;
+
+	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+		return true;
+
+	/* Check if VF is within number of VFs attached to this PF */
+	vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+	nvfs = (cfg >> 12) & 0xFF;
+	if (vf >= nvfs)
+		return false;
+
+	return true;
+}
+
 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
 {
 	struct rvu_block *block;
@@ -597,6 +629,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
 	dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
 			   max_msix * PCI_MSIX_ENTRY_SIZE,
 			   DMA_BIDIRECTIONAL, 0);
+
+	mutex_destroy(&rvu->rsrc_lock);
 }
 
 static int rvu_setup_hw_resources(struct rvu *rvu)
@@ -752,7 +786,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
 	if (!rvu->hwvf)
 		return -ENOMEM;
 
-	spin_lock_init(&rvu->rsrc_lock);
+	mutex_init(&rvu->rsrc_lock);
 
 	err = rvu_setup_msix_resources(rvu);
 	if (err)
@@ -777,17 +811,26 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
 
 	err = rvu_npc_init(rvu);
 	if (err)
-		return err;
+		goto exit;
+
+	err = rvu_cgx_init(rvu);
+	if (err)
+		goto exit;
 
 	err = rvu_npa_init(rvu);
 	if (err)
-		return err;
+		goto cgx_err;
 
 	err = rvu_nix_init(rvu);
 	if (err)
-		return err;
+		goto cgx_err;
 
 	return 0;
+
+cgx_err:
+	rvu_cgx_exit(rvu);
+exit:
+	return err;
 }
 
 /* NPA and NIX admin queue APIs */
@@ -830,7 +873,7 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
 	return 0;
 }
 
-static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
 				  struct ready_msg_rsp *rsp)
 {
 	return 0;
@@ -858,6 +901,22 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
 	return 0;
 }
 
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+{
+	struct rvu_pfvf *pfvf;
+
+	if (!is_pf_func_valid(rvu, pcifunc))
+		return false;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+	/* Check if this PFFUNC has a LF of type blktype attached */
+	if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+		return false;
+
+	return true;
+}
+
 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
 			   int pcifunc, int slot)
 {
@@ -926,7 +985,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
 	struct rvu_block *block;
 	int blkid;
 
-	spin_lock(&rvu->rsrc_lock);
+	mutex_lock(&rvu->rsrc_lock);
 
 	/* Check for partial resource detach */
 	if (detach && detach->partial)
@@ -956,11 +1015,11 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
 		rvu_detach_block(rvu, pcifunc, block->type);
 	}
 
-	spin_unlock(&rvu->rsrc_lock);
+	mutex_unlock(&rvu->rsrc_lock);
 	return 0;
 }
 
-static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
 					     struct rsrc_detach *detach,
 					     struct msg_rsp *rsp)
 {
@@ -1108,7 +1167,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
 	return -ENOSPC;
 }
 
-static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
 					     struct rsrc_attach *attach,
 					     struct msg_rsp *rsp)
 {
@@ -1119,7 +1178,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
 	if (!attach->modify)
 		rvu_detach_rsrcs(rvu, NULL, pcifunc);
 
-	spin_lock(&rvu->rsrc_lock);
+	mutex_lock(&rvu->rsrc_lock);
 
 	/* Check if the request can be accommodated */
 	err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
@@ -1163,7 +1222,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
 	}
 
 exit:
-	spin_unlock(&rvu->rsrc_lock);
+	mutex_unlock(&rvu->rsrc_lock);
 	return err;
 }
 
@@ -1231,7 +1290,7 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
 	rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
 }
 
-static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
 					struct msix_offset_rsp *rsp)
 {
 	struct rvu_hwinfo *hw = rvu->hw;
@@ -1280,22 +1339,51 @@ static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
 	return 0;
 }
 
-static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+				   struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	u16 vf, numvfs;
+	u64 cfg;
+
+	vf = pcifunc & RVU_PFVF_FUNC_MASK;
+	cfg = rvu_read64(rvu, BLKADDR_RVUM,
+			 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+	numvfs = (cfg >> 12) & 0xFF;
+
+	if (vf && vf <= numvfs)
+		__rvu_flr_handler(rvu, pcifunc);
+	else
+		return RVU_INVALID_VF_ID;
+
+	return 0;
+}
+
+static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
 				struct mbox_msghdr *req)
 {
+	struct rvu *rvu = pci_get_drvdata(mbox->pdev);
+
 	/* Check if valid, if not reply with a invalid msg */
 	if (req->sig != OTX2_MBOX_REQ_SIG)
 		goto bad_message;
 
 	switch (req->id) {
-#define M(_name, _id, _req_type, _rsp_type)				\
+#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
 	case _id: {							\
 		struct _rsp_type *rsp;					\
 		int err;						\
 									\
 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
-			&rvu->mbox, devid,				\
+			mbox, devid,					\
 			sizeof(struct _rsp_type));			\
+		/* some handlers should complete even if reply */	\
+		/* could not be allocated */				\
+		if (!rsp &&						\
+		    _id != MBOX_MSG_DETACH_RESOURCES &&			\
+		    _id != MBOX_MSG_NIX_TXSCH_FREE &&			\
+		    _id != MBOX_MSG_VF_FLR)				\
+			return -ENOMEM;					\
 		if (rsp) {						\
 			rsp->hdr.id = _id;				\
 			rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
@@ -1303,9 +1391,9 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
 			rsp->hdr.rc = 0;				\
 		}							\
 									\
-		err = rvu_mbox_handler_ ## _name(rvu,			\
-						 (struct _req_type *)req, \
-						 rsp);			\
+		err = rvu_mbox_handler_ ## _fn_name(rvu,		\
+						    (struct _req_type *)req, \
+						    rsp);		\
 		if (rsp && err)						\
 			rsp->hdr.rc = err;				\
 									\
@@ -1313,29 +1401,38 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
 	}
 MBOX_MESSAGES
 #undef M
-		break;
+
 bad_message:
 	default:
-		otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
-				       req->id);
+		otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
 		return -ENODEV;
 	}
 }
 
-static void rvu_mbox_handler(struct work_struct *work)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
 {
-	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
 	struct rvu *rvu = mwork->rvu;
+	int offset, err, id, devid;
 	struct otx2_mbox_dev *mdev;
 	struct mbox_hdr *req_hdr;
 	struct mbox_msghdr *msg;
+	struct mbox_wq_info *mw;
 	struct otx2_mbox *mbox;
-	int offset, id, err;
-	u16 pf;
 
-	mbox = &rvu->mbox;
-	pf = mwork - rvu->mbox_wrk;
-	mdev = &mbox->dev[pf];
+	switch (type) {
+	case TYPE_AFPF:
+		mw = &rvu->afpf_wq_info;
+		break;
+	case TYPE_AFVF:
+		mw = &rvu->afvf_wq_info;
+		break;
+	default:
+		return;
+	}
+
+	devid = mwork - mw->mbox_wrk;
+	mbox = &mw->mbox;
+	mdev = &mbox->dev[devid];
 
 	/* Process received mbox messages */
 	req_hdr = mdev->mbase + mbox->rx_start;
@@ -1347,10 +1444,21 @@ static void rvu_mbox_handler(struct work_struct *work)
 	for (id = 0; id < req_hdr->num_msgs; id++) {
 		msg = mdev->mbase + offset;
 
-		/* Set which PF sent this message based on mbox IRQ */
-		msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
-		msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
-		err = rvu_process_mbox_msg(rvu, pf, msg);
+		/* Set which PF/VF sent this message based on mbox IRQ */
+		switch (type) {
+		case TYPE_AFPF:
+			msg->pcifunc &=
+				~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+			msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+			break;
+		case TYPE_AFVF:
+			msg->pcifunc &=
+				~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
+			msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
+			break;
+		}
+
+		err = rvu_process_mbox_msg(mbox, devid, msg);
 		if (!err) {
 			offset = mbox->rx_start + msg->next_msgoff;
 			continue;
@@ -1358,31 +1466,57 @@ static void rvu_mbox_handler(struct work_struct *work)
 
 		if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
-				 err, otx2_mbox_id2name(msg->id), msg->id, pf,
+				 err, otx2_mbox_id2name(msg->id),
+				 msg->id, devid,
 				 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
 		else
 			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
-				 err, otx2_mbox_id2name(msg->id), msg->id, pf);
+				 err, otx2_mbox_id2name(msg->id),
+				 msg->id, devid);
 	}
 
-	/* Send mbox responses to PF */
-	otx2_mbox_msg_send(mbox, pf);
+	/* Send mbox responses to VF/PF */
+	otx2_mbox_msg_send(mbox, devid);
 }
 
-static void rvu_mbox_up_handler(struct work_struct *work)
+static inline void rvu_afpf_mbox_handler(struct work_struct *work)
 {
 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+	__rvu_mbox_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_handler(struct work_struct *work)
+{
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+	__rvu_mbox_handler(mwork, TYPE_AFVF);
+}
+
+static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+{
 	struct rvu *rvu = mwork->rvu;
 	struct otx2_mbox_dev *mdev;
 	struct mbox_hdr *rsp_hdr;
 	struct mbox_msghdr *msg;
+	struct mbox_wq_info *mw;
 	struct otx2_mbox *mbox;
-	int offset, id;
-	u16 pf;
+	int offset, id, devid;
 
-	mbox = &rvu->mbox_up;
-	pf = mwork - rvu->mbox_wrk_up;
-	mdev = &mbox->dev[pf];
+	switch (type) {
+	case TYPE_AFPF:
+		mw = &rvu->afpf_wq_info;
+		break;
+	case TYPE_AFVF:
+		mw = &rvu->afvf_wq_info;
+		break;
+	default:
+		return;
+	}
+
+	devid = mwork - mw->mbox_wrk_up;
+	mbox = &mw->mbox_up;
+	mdev = &mbox->dev[devid];
 
 	rsp_hdr = mdev->mbase + mbox->rx_start;
 	if (rsp_hdr->num_msgs == 0) {
@@ -1423,128 +1557,182 @@ static void rvu_mbox_up_handler(struct work_struct *work)
 		mdev->msgs_acked++;
 	}
 
-	otx2_mbox_reset(mbox, 0);
+	otx2_mbox_reset(mbox, devid);
 }
 
-static int rvu_mbox_init(struct rvu *rvu)
+static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
 {
-	struct rvu_hwinfo *hw = rvu->hw;
-	void __iomem *hwbase = NULL;
-	struct rvu_work *mwork;
-	u64 bar4_addr;
-	int err, pf;
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
 
-	rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
-				       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
-				       hw->total_pfs);
-	if (!rvu->mbox_wq)
+	__rvu_mbox_up_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+{
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+	__rvu_mbox_up_handler(mwork, TYPE_AFVF);
+}
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+			 int type, int num,
+			 void (mbox_handler)(struct work_struct *),
+			 void (mbox_up_handler)(struct work_struct *))
+{
+	void __iomem *hwbase = NULL, *reg_base;
+	int err, i, dir, dir_up;
+	struct rvu_work *mwork;
+	const char *name;
+	u64 bar4_addr;
+
+	switch (type) {
+	case TYPE_AFPF:
+		name = "rvu_afpf_mailbox";
+		bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+		dir = MBOX_DIR_AFPF;
+		dir_up = MBOX_DIR_AFPF_UP;
+		reg_base = rvu->afreg_base;
+		break;
+	case TYPE_AFVF:
+		name = "rvu_afvf_mailbox";
+		bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+		dir = MBOX_DIR_PFVF;
+		dir_up = MBOX_DIR_PFVF_UP;
+		reg_base = rvu->pfreg_base;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mw->mbox_wq = alloc_workqueue(name,
+				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+				      num);
+	if (!mw->mbox_wq)
 		return -ENOMEM;
 
-	rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
-				     sizeof(struct rvu_work), GFP_KERNEL);
-	if (!rvu->mbox_wrk) {
+	mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
+				    sizeof(struct rvu_work), GFP_KERNEL);
+	if (!mw->mbox_wrk) {
 		err = -ENOMEM;
 		goto exit;
 	}
 
-	rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
-					sizeof(struct rvu_work), GFP_KERNEL);
-	if (!rvu->mbox_wrk_up) {
+	mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
+				       sizeof(struct rvu_work), GFP_KERNEL);
+	if (!mw->mbox_wrk_up) {
 		err = -ENOMEM;
 		goto exit;
 	}
 
-	/* Map mbox region shared with PFs */
-	bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
 	/* Mailbox is a reserved memory (in RAM) region shared between
 	 * RVU devices, shouldn't be mapped as device memory to allow
 	 * unaligned accesses.
 	 */
-	hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+	hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
 	if (!hwbase) {
 		dev_err(rvu->dev, "Unable to map mailbox region\n");
 		err = -ENOMEM;
 		goto exit;
 	}
 
-	err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
-			     MBOX_DIR_AFPF, hw->total_pfs);
+	err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
 	if (err)
 		goto exit;
 
-	err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
-			     MBOX_DIR_AFPF_UP, hw->total_pfs);
+	err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
+			     reg_base, dir_up, num);
 	if (err)
 		goto exit;
 
-	for (pf = 0; pf < hw->total_pfs; pf++) {
-		mwork = &rvu->mbox_wrk[pf];
+	for (i = 0; i < num; i++) {
+		mwork = &mw->mbox_wrk[i];
 		mwork->rvu = rvu;
-		INIT_WORK(&mwork->work, rvu_mbox_handler);
-	}
+		INIT_WORK(&mwork->work, mbox_handler);
 
-	for (pf = 0; pf < hw->total_pfs; pf++) {
-		mwork = &rvu->mbox_wrk_up[pf];
+		mwork = &mw->mbox_wrk_up[i];
 		mwork->rvu = rvu;
-		INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+		INIT_WORK(&mwork->work, mbox_up_handler);
 	}
 
 	return 0;
 exit:
 	if (hwbase)
 		iounmap((void __iomem *)hwbase);
-	destroy_workqueue(rvu->mbox_wq);
+	destroy_workqueue(mw->mbox_wq);
 	return err;
 }
 
-static void rvu_mbox_destroy(struct rvu *rvu)
+static void rvu_mbox_destroy(struct mbox_wq_info *mw)
 {
-	if (rvu->mbox_wq) {
-		flush_workqueue(rvu->mbox_wq);
-		destroy_workqueue(rvu->mbox_wq);
-		rvu->mbox_wq = NULL;
+	if (mw->mbox_wq) {
+		flush_workqueue(mw->mbox_wq);
+		destroy_workqueue(mw->mbox_wq);
+		mw->mbox_wq = NULL;
 	}
 
-	if (rvu->mbox.hwbase)
-		iounmap((void __iomem *)rvu->mbox.hwbase);
+	if (mw->mbox.hwbase)
+		iounmap((void __iomem *)mw->mbox.hwbase);
 
-	otx2_mbox_destroy(&rvu->mbox);
-	otx2_mbox_destroy(&rvu->mbox_up);
+	otx2_mbox_destroy(&mw->mbox);
+	otx2_mbox_destroy(&mw->mbox_up);
+}
+
+static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+			   int mdevs, u64 intr)
+{
+	struct otx2_mbox_dev *mdev;
+	struct otx2_mbox *mbox;
+	struct mbox_hdr *hdr;
+	int i;
+
+	for (i = first; i < mdevs; i++) {
+		/* start from 0 */
+		if (!(intr & BIT_ULL(i - first)))
+			continue;
+
+		mbox = &mw->mbox;
+		mdev = &mbox->dev[i];
+		hdr = mdev->mbase + mbox->rx_start;
+		if (hdr->num_msgs)
+			queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+
+		mbox = &mw->mbox_up;
+		mdev = &mbox->dev[i];
+		hdr = mdev->mbase + mbox->rx_start;
+		if (hdr->num_msgs)
+			queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+	}
 }
 
 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
 {
 	struct rvu *rvu = (struct rvu *)rvu_irq;
-	struct otx2_mbox_dev *mdev;
-	struct otx2_mbox *mbox;
-	struct mbox_hdr *hdr;
+	int vfs = rvu->vfs;
 	u64 intr;
-	u8  pf;
 
 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
 	/* Clear interrupts */
 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
 
 	/* Sync with mbox memory region */
-	smp_wmb();
+	rmb();
 
-	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
-		if (intr & (1ULL << pf)) {
-			mbox = &rvu->mbox;
-			mdev = &mbox->dev[pf];
-			hdr = mdev->mbase + mbox->rx_start;
-			if (hdr->num_msgs)
-				queue_work(rvu->mbox_wq,
-					   &rvu->mbox_wrk[pf].work);
-			mbox = &rvu->mbox_up;
-			mdev = &mbox->dev[pf];
-			hdr = mdev->mbase + mbox->rx_start;
-			if (hdr->num_msgs)
-				queue_work(rvu->mbox_wq,
-					   &rvu->mbox_wrk_up[pf].work);
-		}
+	rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
+	/* Handle VF interrupts */
+	if (vfs > 64) {
+		intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+		rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+
+		rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+		vfs -= 64;
 	}
 
+	intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+	rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
+
 	return IRQ_HANDLED;
 }
 
@@ -1561,6 +1749,216 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
 		    INTR_MASK(hw->total_pfs) & ~1ULL);
 }
 
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+	struct rvu_block *block;
+	int slot, lf, num_lfs;
+	int err;
+
+	block = &rvu->hw->block[blkaddr];
+	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+					block->type);
+	if (!num_lfs)
+		return;
+	for (slot = 0; slot < num_lfs; slot++) {
+		lf = rvu_get_lf(rvu, block, pcifunc, slot);
+		if (lf < 0)
+			continue;
+
+		/* Cleanup LF and reset it */
+		if (block->addr == BLKADDR_NIX0)
+			rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+		else if (block->addr == BLKADDR_NPA)
+			rvu_npa_lf_teardown(rvu, pcifunc, lf);
+
+		err = rvu_lf_reset(rvu, block, lf);
+		if (err) {
+			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+				block->addr, lf);
+		}
+	}
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+	mutex_lock(&rvu->flr_lock);
+	/* Reset order should reflect inter-block dependencies:
+	 * 1. Reset any packet/work sources (NIX, CPT, TIM)
+	 * 2. Flush and reset SSO/SSOW
+	 * 3. Cleanup pools (NPA)
+	 */
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+	rvu_detach_rsrcs(rvu, NULL, pcifunc);
+	mutex_unlock(&rvu->flr_lock);
+}
+
+static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
+{
+	int reg = 0;
+
+	/* pcifunc = 0(PF0) | (vf + 1) */
+	__rvu_flr_handler(rvu, vf + 1);
+
+	if (vf >= 64) {
+		reg = 1;
+		vf = vf - 64;
+	}
+
+	/* Signal FLR finish and enable IRQ */
+	rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static void rvu_flr_handler(struct work_struct *work)
+{
+	struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
+	struct rvu *rvu = flrwork->rvu;
+	u16 pcifunc, numvfs, vf;
+	u64 cfg;
+	int pf;
+
+	pf = flrwork - rvu->flr_wrk;
+	if (pf >= rvu->hw->total_pfs) {
+		rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
+		return;
+	}
+
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+	numvfs = (cfg >> 12) & 0xFF;
+	pcifunc  = pf << RVU_PFVF_PF_SHIFT;
+
+	for (vf = 0; vf < numvfs; vf++)
+		__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+	__rvu_flr_handler(rvu, pcifunc);
+
+	/* Signal FLR finish */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
+
+	/* Enable interrupt */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
+}
+
+static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
+{
+	int dev, vf, reg = 0;
+	u64 intr;
+
+	if (start_vf >= 64)
+		reg = 1;
+
+	intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
+	if (!intr)
+		return;
+
+	for (vf = 0; vf < numvfs; vf++) {
+		if (!(intr & BIT_ULL(vf)))
+			continue;
+		dev = vf + start_vf + rvu->hw->total_pfs;
+		queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
+		/* Clear and disable the interrupt */
+		rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+		rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+	}
+}
+
+static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
+{
+	struct rvu *rvu = (struct rvu *)rvu_irq;
+	u64 intr;
+	u8  pf;
+
+	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
+	if (!intr)
+		goto afvf_flr;
+
+	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+		if (intr & (1ULL << pf)) {
+			/* PF is already dead do only AF related operations */
+			queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
+			/* clear interrupt */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
+				    BIT_ULL(pf));
+			/* Disable the interrupt */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+				    BIT_ULL(pf));
+		}
+	}
+
+afvf_flr:
+	rvu_afvf_queue_flr_work(rvu, 0, 64);
+	if (rvu->vfs > 64)
+		rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
+
+	return IRQ_HANDLED;
+}
+
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+	int vf;
+
+	/* Nothing to be done here other than clearing the
+	 * TRPEND bit.
+	 */
+	for (vf = 0; vf < 64; vf++) {
+		if (intr & (1ULL << vf)) {
+			/* clear the trpend due to ME(master enable) */
+			rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+			/* clear interrupt */
+			rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+		}
+	}
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+	struct rvu *rvu = (struct rvu *)rvu_irq;
+	int vfset;
+	u64 intr;
+
+	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+	for (vfset = 0; vfset <= 1; vfset++) {
+		intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+		if (intr)
+			rvu_me_handle_vfset(rvu, vfset, intr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+	struct rvu *rvu = (struct rvu *)rvu_irq;
+	u64 intr;
+	u8  pf;
+
+	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+	/* Nothing to be done here other than clearing the
+	 * TRPEND bit.
+	 */
+	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+		if (intr & (1ULL << pf)) {
+			/* clear the trpend due to ME(master enable) */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+				    BIT_ULL(pf));
+			/* clear interrupt */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+				    BIT_ULL(pf));
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
 static void rvu_unregister_interrupts(struct rvu *rvu)
 {
 	int irq;
@@ -1569,6 +1967,14 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
 
+	/* Disable the PF FLR interrupt */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	/* Disable the PF ME interrupt */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
 	for (irq = 0; irq < rvu->num_vec; irq++) {
 		if (rvu->irq_allocated[irq])
 			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
@@ -1578,9 +1984,25 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
 	rvu->num_vec = 0;
 }
 
+static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
+{
+	struct rvu_pfvf *pfvf = &rvu->pf[0];
+	int offset;
+
+	pfvf = &rvu->pf[0];
+	offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+	/* Make sure there are enough MSIX vectors configured so that
+	 * VF interrupts can be handled. Offset equal to zero means
+	 * that PF vectors are not configured and overlapping AF vectors.
+	 */
+	return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
+	       offset;
+}
+
 static int rvu_register_interrupts(struct rvu *rvu)
 {
-	int ret;
+	int ret, offset, pf_vec_start;
 
 	rvu->num_vec = pci_msix_vec_count(rvu->pdev);
 
@@ -1620,13 +2042,323 @@ static int rvu_register_interrupts(struct rvu *rvu)
 	/* Enable mailbox interrupts from all PFs */
 	rvu_enable_mbox_intr(rvu);
 
+	/* Register FLR interrupt handler */
+	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+		"RVUAF FLR");
+	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
+			  rvu_flr_intr_handler, 0,
+			  &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+			  rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for FLR\n");
+		goto fail;
+	}
+	rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
+
+	/* Enable FLR interrupt for all PFs*/
+	rvu_write64(rvu, BLKADDR_RVUM,
+		    RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
+
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	/* Register ME interrupt handler */
+	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+		"RVUAF ME");
+	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+			  rvu_me_pf_intr_handler, 0,
+			  &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+			  rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for ME\n");
+	}
+	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+	/* Enable ME interrupt for all PFs*/
+	rvu_write64(rvu, BLKADDR_RVUM,
+		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	if (!rvu_afvf_msix_vectors_num_ok(rvu))
+		return 0;
+
+	/* Get PF MSIX vectors offset. */
+	pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
+				  RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+	/* Register MBOX0 interrupt. */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_mbox_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE],
+			  rvu);
+	if (ret)
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for Mbox0\n");
+
+	rvu->irq_allocated[offset] = true;
+
+	/* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+	 * simply increment current offset by 1.
+	 */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_mbox_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE],
+			  rvu);
+	if (ret)
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for Mbox1\n");
+
+	rvu->irq_allocated[offset] = true;
+
+	/* Register FLR interrupt handler for AF's VFs */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_flr_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
+
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_flr_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
+
+	/* Register ME interrupt handler for AF's VFs */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_me_vf_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
+
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_me_vf_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
 	return 0;
 
 fail:
-	pci_free_irq_vectors(rvu->pdev);
+	rvu_unregister_interrupts(rvu);
 	return ret;
 }
 
+static void rvu_flr_wq_destroy(struct rvu *rvu)
+{
+	if (rvu->flr_wq) {
+		flush_workqueue(rvu->flr_wq);
+		destroy_workqueue(rvu->flr_wq);
+		rvu->flr_wq = NULL;
+	}
+}
+
+static int rvu_flr_init(struct rvu *rvu)
+{
+	int dev, num_devs;
+	u64 cfg;
+	int pf;
+
+	/* Enable FLR for all PFs*/
+	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+		rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
+			    cfg | BIT_ULL(22));
+	}
+
+	rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
+				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+				       1);
+	if (!rvu->flr_wq)
+		return -ENOMEM;
+
+	num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
+	rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
+				    sizeof(struct rvu_work), GFP_KERNEL);
+	if (!rvu->flr_wrk) {
+		destroy_workqueue(rvu->flr_wq);
+		return -ENOMEM;
+	}
+
+	for (dev = 0; dev < num_devs; dev++) {
+		rvu->flr_wrk[dev].rvu = rvu;
+		INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
+	}
+
+	mutex_init(&rvu->flr_lock);
+
+	return 0;
+}
+
+static void rvu_disable_afvf_intr(struct rvu *rvu)
+{
+	int vfs = rvu->vfs;
+
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+	if (vfs <= 64)
+		return;
+
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+		      INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+static void rvu_enable_afvf_intr(struct rvu *rvu)
+{
+	int vfs = rvu->vfs;
+
+	/* Clear any pending interrupts and enable AF VF interrupts for
+	 * the first 64 VFs.
+	 */
+	/* Mbox */
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+	/* FLR */
+	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+	/* Same for remaining VFs, if any. */
+	if (vfs <= 64)
+		return;
+
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+		      INTR_MASK(vfs - 64));
+
+	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
+
+static int lbk_get_num_chans(void)
+{
+	struct pci_dev *pdev;
+	void __iomem *base;
+	int ret = -EIO;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
+			      NULL);
+	if (!pdev)
+		goto err;
+
+	base = pci_ioremap_bar(pdev, 0);
+	if (!base)
+		goto err_put;
+
+	/* Read number of available LBK channels from LBK(0)_CONST register. */
+	ret = (readq(base + 0x10) >> 32) & 0xffff;
+	iounmap(base);
+err_put:
+	pci_dev_put(pdev);
+err:
+	return ret;
+}
+
+static int rvu_enable_sriov(struct rvu *rvu)
+{
+	struct pci_dev *pdev = rvu->pdev;
+	int err, chans, vfs;
+
+	if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
+		dev_warn(&pdev->dev,
+			 "Skipping SRIOV enablement since not enough IRQs are available\n");
+		return 0;
+	}
+
+	chans = lbk_get_num_chans();
+	if (chans < 0)
+		return chans;
+
+	vfs = pci_sriov_get_totalvfs(pdev);
+
+	/* Limit VFs in case we have more VFs than LBK channels available. */
+	if (vfs > chans)
+		vfs = chans;
+
+	/* AF's VFs work in pairs and talk over consecutive loopback channels.
+	 * Thus we want to enable maximum even number of VFs. In case
+	 * odd number of VFs are available then the last VF on the list
+	 * remains disabled.
+	 */
+	if (vfs & 0x1) {
+		dev_warn(&pdev->dev,
+			 "Number of VFs should be even. Enabling %d out of %d.\n",
+			 vfs - 1, vfs);
+		vfs--;
+	}
+
+	if (!vfs)
+		return 0;
+
+	/* Save VFs number for reference in VF interrupts handlers.
+	 * Since interrupts might start arriving during SRIOV enablement
+	 * ordinary API cannot be used to get number of enabled VFs.
+	 */
+	rvu->vfs = vfs;
+
+	err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
+			    rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
+	if (err)
+		return err;
+
+	rvu_enable_afvf_intr(rvu);
+	/* Make sure IRQs are enabled before SRIOV. */
+	mb();
+
+	err = pci_enable_sriov(pdev, vfs);
+	if (err) {
+		rvu_disable_afvf_intr(rvu);
+		rvu_mbox_destroy(&rvu->afvf_wq_info);
+		return err;
+	}
+
+	return 0;
+}
+
+static void rvu_disable_sriov(struct rvu *rvu)
+{
+	rvu_disable_afvf_intr(rvu);
+	rvu_mbox_destroy(&rvu->afvf_wq_info);
+	pci_disable_sriov(rvu->pdev);
+}
+
 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct device *dev = &pdev->dev;
@@ -1689,24 +2421,35 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (err)
 		goto err_release_regions;
 
-	err = rvu_mbox_init(rvu);
+	/* Init mailbox btw AF and PFs */
+	err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
+			    rvu->hw->total_pfs, rvu_afpf_mbox_handler,
+			    rvu_afpf_mbox_up_handler);
 	if (err)
 		goto err_hwsetup;
 
-	err = rvu_cgx_probe(rvu);
+	err = rvu_flr_init(rvu);
 	if (err)
 		goto err_mbox;
 
 	err = rvu_register_interrupts(rvu);
 	if (err)
-		goto err_cgx;
+		goto err_flr;
+
+	/* Enable AF's VFs (if any) */
+	err = rvu_enable_sriov(rvu);
+	if (err)
+		goto err_irq;
 
 	return 0;
-err_cgx:
-	rvu_cgx_wq_destroy(rvu);
+err_irq:
+	rvu_unregister_interrupts(rvu);
+err_flr:
+	rvu_flr_wq_destroy(rvu);
 err_mbox:
-	rvu_mbox_destroy(rvu);
+	rvu_mbox_destroy(&rvu->afpf_wq_info);
 err_hwsetup:
+	rvu_cgx_exit(rvu);
 	rvu_reset_all_blocks(rvu);
 	rvu_free_hw_resources(rvu);
 err_release_regions:
@@ -1725,8 +2468,10 @@ static void rvu_remove(struct pci_dev *pdev)
 	struct rvu *rvu = pci_get_drvdata(pdev);
 
 	rvu_unregister_interrupts(rvu);
-	rvu_cgx_wq_destroy(rvu);
-	rvu_mbox_destroy(rvu);
+	rvu_flr_wq_destroy(rvu);
+	rvu_cgx_exit(rvu);
+	rvu_mbox_destroy(&rvu->afpf_wq_info);
+	rvu_disable_sriov(rvu);
 	rvu_reset_all_blocks(rvu);
 	rvu_free_hw_resources(rvu);
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 2c0580c..ae8e2e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -11,6 +11,7 @@
 #ifndef RVU_H
 #define RVU_H
 
+#include <linux/pci.h>
 #include "rvu_struct.h"
 #include "common.h"
 #include "mbox.h"
@@ -18,6 +19,9 @@
 /* PCI device IDs */
 #define	PCI_DEVID_OCTEONTX2_RVU_AF		0xA065
 
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_96XX                  0xB200
+
 /* PCI BAR nos */
 #define	PCI_AF_REG_BAR_NUM			0
 #define	PCI_PF_REG_BAR_NUM			2
@@ -64,7 +68,7 @@ struct nix_mcast {
 	struct qmem	*mcast_buf;
 	int		replay_pkind;
 	int		next_free_mce;
-	spinlock_t	mce_lock; /* Serialize MCE updates */
+	struct mutex	mce_lock; /* Serialize MCE updates */
 };
 
 struct nix_mce_list {
@@ -74,15 +78,27 @@ struct nix_mce_list {
 };
 
 struct npc_mcam {
-	spinlock_t	lock;	/* MCAM entries and counters update lock */
+	struct rsrc_bmap counters;
+	struct mutex	lock;	/* MCAM entries and counters update lock */
+	unsigned long	*bmap;		/* bitmap, 0 => bmap_entries */
+	unsigned long	*bmap_reverse;	/* Reverse bitmap, bmap_entries => 0 */
+	u16	bmap_entries;	/* Number of unreserved MCAM entries */
+	u16	bmap_fcnt;	/* MCAM entries free count */
+	u16	*entry2pfvf_map;
+	u16	*entry2cntr_map;
+	u16	*cntr2pfvf_map;
+	u16	*cntr_refcnt;
 	u8	keysize;	/* MCAM keysize 112/224/448 bits */
 	u8	banks;		/* Number of MCAM banks */
 	u8	banks_per_entry;/* Number of keywords in key */
 	u16	banksize;	/* Number of MCAM entries in each bank */
 	u16	total_entries;	/* Total number of MCAM entries */
-	u16     entries;	/* Total minus reserved for NIX LFs */
 	u16	nixlf_offset;	/* Offset of nixlf rsvd uncast entries */
 	u16	pf_offset;	/* Offset of PF's rsvd bcast, promisc entries */
+	u16	lprio_count;
+	u16	lprio_start;
+	u16	hprio_count;
+	u16	hprio_end;
 };
 
 /* Structure for per RVU func info ie PF/VF */
@@ -122,12 +138,19 @@ struct rvu_pfvf {
 	u16		tx_chan_base;
 	u8              rx_chan_cnt; /* total number of RX channels */
 	u8              tx_chan_cnt; /* total number of TX channels */
+	u16		maxlen;
+	u16		minlen;
 
 	u8		mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
 
 	/* Broadcast pkt replication info */
 	u16			bcast_mce_idx;
 	struct nix_mce_list	bcast_mce_list;
+
+	/* VLAN offload */
+	struct mcam_entry entry;
+	int rxvlan_index;
+	bool rxvlan;
 };
 
 struct nix_txsch {
@@ -164,6 +187,16 @@ struct rvu_hwinfo {
 	struct npc_mcam  mcam;
 };
 
+struct mbox_wq_info {
+	struct otx2_mbox mbox;
+	struct rvu_work *mbox_wrk;
+
+	struct otx2_mbox mbox_up;
+	struct rvu_work *mbox_wrk_up;
+
+	struct workqueue_struct *mbox_wq;
+};
+
 struct rvu {
 	void __iomem		*afreg_base;
 	void __iomem		*pfreg_base;
@@ -172,14 +205,17 @@ struct rvu {
 	struct rvu_hwinfo       *hw;
 	struct rvu_pfvf		*pf;
 	struct rvu_pfvf		*hwvf;
-	spinlock_t		rsrc_lock; /* Serialize resource alloc/free */
+	struct mutex		rsrc_lock; /* Serialize resource alloc/free */
+	int			vfs; /* Number of VFs attached to RVU */
 
 	/* Mbox */
-	struct otx2_mbox	mbox;
-	struct rvu_work		*mbox_wrk;
-	struct otx2_mbox        mbox_up;
-	struct rvu_work		*mbox_wrk_up;
-	struct workqueue_struct *mbox_wq;
+	struct mbox_wq_info	afpf_wq_info;
+	struct mbox_wq_info	afvf_wq_info;
+
+	/* PF FLR */
+	struct rvu_work		*flr_wrk;
+	struct workqueue_struct *flr_wq;
+	struct mutex		flr_lock; /* Serialize FLRs */
 
 	/* MSI-X */
 	u16			num_vec;
@@ -190,7 +226,7 @@ struct rvu {
 	/* CGX */
 #define PF_CGXMAP_BASE		1 /* PF 0 is reserved for RVU PF */
 	u8			cgx_mapped_pfs;
-	u8			cgx_cnt; /* available cgx ports */
+	u8			cgx_cnt_max;	 /* CGX port count max */
 	u8			*pf2cgxlmac_map; /* pf to cgx_lmac map */
 	u16			*cgxlmac2pf_map; /* bitmap of mapped pfs for
 						  * every cgx lmac port
@@ -223,9 +259,22 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
 	return readq(rvu->pfreg_base + offset);
 }
 
+static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+{
+	struct pci_dev *pdev = rvu->pdev;
+
+	return (pdev->revision == 0x00) &&
+		(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
 /* Function Prototypes
  * RVU
  */
+static inline int is_afvf(u16 pcifunc)
+{
+	return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+}
+
 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
@@ -236,6 +285,7 @@ int rvu_get_pf(u16 pcifunc);
 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
@@ -266,89 +316,100 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
 	*lmac_id = (map & 0xF);
 }
 
-int rvu_cgx_probe(struct rvu *rvu);
-void rvu_cgx_wq_destroy(struct rvu *rvu);
+int rvu_cgx_init(struct rvu *rvu);
+int rvu_cgx_exit(struct rvu *rvu);
 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
 				    struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
 				   struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
 			       struct cgx_stats_rsp *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
 				      struct cgx_mac_addr_set_or_get *req,
 				      struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
 				      struct cgx_mac_addr_set_or_get *req,
 				      struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
 					struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
 					 struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
 					  struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
 					 struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
 				      struct cgx_link_info_msg *rsp);
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
 				       struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
 					struct msg_rsp *rsp);
 
 /* NPA APIs */
 int rvu_npa_init(struct rvu *rvu);
 void rvu_npa_freemem(struct rvu *rvu);
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
 				struct npa_aq_enq_req *req,
 				struct npa_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
 				       struct hwctx_disable_req *req,
 				       struct msg_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
 				  struct npa_lf_alloc_req *req,
 				  struct npa_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
 				 struct msg_rsp *rsp);
 
 /* NIX APIs */
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
 int rvu_nix_init(struct rvu *rvu);
 void rvu_nix_freemem(struct rvu *rvu);
 int rvu_get_nixlf_count(struct rvu *rvu);
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
 				  struct nix_lf_alloc_req *req,
 				  struct nix_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
 				 struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
 				struct nix_aq_enq_req *req,
 				struct nix_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
 				       struct hwctx_disable_req *req,
 				       struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 				     struct nix_txsch_alloc_req *req,
 				     struct nix_txsch_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
 				    struct nix_txsch_free_req *req,
 				    struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 				    struct nix_txschq_config *req,
 				    struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
 				   struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
 				  struct nix_vtag_config *req,
 				  struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+				      struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
 					 struct nix_rss_flowkey_cfg *req,
 					 struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
 				      struct nix_set_mac_addr *req,
 				      struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
 				     struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+				     struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+				    struct msg_rsp *rsp);
 
 /* NPC APIs */
 int rvu_npc_init(struct rvu *rvu);
@@ -360,9 +421,48 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
 				   int nixlf, u64 chan, bool allmulti);
 void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
 				       int nixlf, u64 chan);
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
 				    int group, int alg_idx, int mcam_index);
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+					  struct npc_mcam_alloc_entry_req *req,
+					  struct npc_mcam_alloc_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+					 struct npc_mcam_free_entry_req *req,
+					 struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+					  struct npc_mcam_write_entry_req *req,
+					  struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+					  struct npc_mcam_shift_entry_req *req,
+					  struct npc_mcam_shift_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+				struct npc_mcam_alloc_counter_req *req,
+				struct npc_mcam_alloc_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+		   struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+		struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+			struct npc_mcam_oper_counter_req *req,
+			struct npc_mcam_oper_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+			  struct npc_mcam_alloc_and_write_entry_req *req,
+			  struct npc_mcam_alloc_and_write_entry_rsp *rsp);
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+				     struct npc_get_kex_cfg_rsp *rsp);
 #endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 188185c..7d7133c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -20,14 +20,14 @@ struct cgx_evq_entry {
 	struct cgx_link_event link_event;
 };
 
-#define M(_name, _id, _req_type, _rsp_type)				\
+#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
 static struct _req_type __maybe_unused					\
-*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid)		\
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
 {									\
 	struct _req_type *req;						\
 									\
 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
-		&rvu->mbox_up, devid, sizeof(struct _req_type),		\
+		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
 		sizeof(struct _rsp_type));				\
 	if (!req)							\
 		return NULL;						\
@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
 
 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
 {
-	if (cgx_id >= rvu->cgx_cnt)
+	if (cgx_id >= rvu->cgx_cnt_max)
 		return NULL;
 
 	return rvu->cgx_idmap[cgx_id];
@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 {
 	struct npc_pkind *pkind = &rvu->hw->pkind;
-	int cgx_cnt = rvu->cgx_cnt;
+	int cgx_cnt_max = rvu->cgx_cnt_max;
 	int cgx, lmac_cnt, lmac;
 	int pf = PF_CGXMAP_BASE;
 	int size, free_pkind;
 
-	if (!cgx_cnt)
+	if (!cgx_cnt_max)
 		return 0;
 
-	if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+	if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
 		return -EINVAL;
 
 	/* Alloc map table
 	 * An additional entry is required since PF id starts from 1 and
 	 * hence entry at offset 0 is invalid.
 	 */
-	size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
-	rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+	size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
 	if (!rvu->pf2cgxlmac_map)
 		return -ENOMEM;
 
-	/* Initialize offset 0 with an invalid cgx and lmac id */
-	rvu->pf2cgxlmac_map[0] = 0xFF;
+	/* Initialize all entries with an invalid cgx and lmac id */
+	memset(rvu->pf2cgxlmac_map, 0xFF, size);
 
 	/* Reverse map table */
 	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
-				  cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+				  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
 				  GFP_KERNEL);
 	if (!rvu->cgxlmac2pf_map)
 		return -ENOMEM;
 
 	rvu->cgx_mapped_pfs = 0;
-	for (cgx = 0; cgx < cgx_cnt; cgx++) {
+	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+		if (!rvu_cgx_pdata(cgx, rvu))
+			continue;
 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
 		for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -177,12 +179,12 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
 		}
 
 		/* Send mbox message to PF */
-		msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
 		if (!msg)
 			continue;
 		msg->link_info = *linfo;
-		otx2_mbox_msg_send(&rvu->mbox_up, pfid);
-		err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
 		if (err)
 			dev_warn(rvu->dev, "notification to pf %d failed\n",
 				 pfid);
@@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work)
 	} while (1);
 }
 
-static void cgx_lmac_event_handler_init(struct rvu *rvu)
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
 {
 	struct cgx_event_cb cb;
 	int cgx, lmac, err;
@@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
 	if (!rvu->cgx_evh_wq) {
 		dev_err(rvu->dev, "alloc workqueue failed");
-		return;
+		return -ENOMEM;
 	}
 
 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
 	cb.data = rvu;
 
-	for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
 		cgxd = rvu_cgx_pdata(cgx, rvu);
+		if (!cgxd)
+			continue;
 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
 			if (err)
@@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
 					cgx, lmac);
 		}
 	}
+
+	return 0;
 }
 
-void rvu_cgx_wq_destroy(struct rvu *rvu)
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
 {
 	if (rvu->cgx_evh_wq) {
 		flush_workqueue(rvu->cgx_evh_wq);
@@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu)
 	}
 }
 
-int rvu_cgx_probe(struct rvu *rvu)
+int rvu_cgx_init(struct rvu *rvu)
 {
-	int i, err;
+	int cgx, err;
+	void *cgxd;
 
-	/* find available cgx ports */
-	rvu->cgx_cnt = cgx_get_cgx_cnt();
-	if (!rvu->cgx_cnt) {
+	/* CGX port id starts from 0 and are not necessarily contiguous
+	 * Hence we allocate resources based on the maximum port id value.
+	 */
+	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+	if (!rvu->cgx_cnt_max) {
 		dev_info(rvu->dev, "No CGX devices found!\n");
 		return -ENODEV;
 	}
 
-	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
-				      GFP_KERNEL);
+	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+				      sizeof(void *), GFP_KERNEL);
 	if (!rvu->cgx_idmap)
 		return -ENOMEM;
 
 	/* Initialize the cgxdata table */
-	for (i = 0; i < rvu->cgx_cnt; i++)
-		rvu->cgx_idmap[i] = cgx_get_pdata(i);
+	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
 
 	/* Map CGX LMAC interfaces to RVU PFs */
 	err = rvu_map_cgx_lmac_pf(rvu);
@@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu)
 		return err;
 
 	/* Register for CGX events */
-	cgx_lmac_event_handler_init(rvu);
+	err = cgx_lmac_event_handler_init(rvu);
+	if (err)
+		return err;
+
+	/* Ensure event handler registration is completed, before
+	 * we turn on the links
+	 */
+	mb();
+
+	/* Do link up for all CGX ports */
+	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+		cgxd = rvu_cgx_pdata(cgx, rvu);
+		if (!cgxd)
+			continue;
+		err = cgx_lmac_linkup_start(cgxd);
+		if (err)
+			dev_err(rvu->dev,
+				"Link up process failed to start on cgx %d\n",
+				cgx);
+	}
+
+	return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+	int cgx, lmac;
+	void *cgxd;
+
+	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+		cgxd = rvu_cgx_pdata(cgx, rvu);
+		if (!cgxd)
+			continue;
+		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+			cgx_lmac_evh_unregister(cgxd, lmac);
+	}
+
+	/* Ensure event handler unregister is completed */
+	mb();
+
+	rvu_cgx_wq_destroy(rvu);
 	return 0;
 }
 
@@ -303,21 +352,21 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
 				    struct msg_rsp *rsp)
 {
 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
 				   struct msg_rsp *rsp)
 {
 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
 			       struct cgx_stats_rsp *rsp)
 {
 	int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -354,7 +403,7 @@ int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
 				      struct cgx_mac_addr_set_or_get *req,
 				      struct cgx_mac_addr_set_or_get *rsp)
 {
@@ -368,7 +417,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
 				      struct cgx_mac_addr_set_or_get *req,
 				      struct cgx_mac_addr_set_or_get *rsp)
 {
@@ -387,7 +436,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
 					struct msg_rsp *rsp)
 {
 	u16 pcifunc = req->hdr.pcifunc;
@@ -407,7 +456,7 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
 					 struct msg_rsp *rsp)
 {
 	u16 pcifunc = req->hdr.pcifunc;
@@ -451,21 +500,21 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
 					  struct msg_rsp *rsp)
 {
 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
 					 struct msg_rsp *rsp)
 {
 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
 				      struct cgx_link_info_msg *rsp)
 {
 	u8 cgx_id, lmac_id;
@@ -500,14 +549,14 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
 					  lmac_id, en);
 }
 
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
 				       struct msg_rsp *rsp)
 {
 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
 	return 0;
 }
 
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
 					struct msg_rsp *rsp)
 {
 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5ab7ef..962a82f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -55,6 +55,17 @@ struct mce {
 	u16			pcifunc;
 };
 
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return false;
+	return true;
+}
+
 int rvu_get_nixlf_count(struct rvu *rvu)
 {
 	struct rvu_block *block;
@@ -94,6 +105,23 @@ static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
 	return NULL;
 }
 
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+	int err;
+
+	/*Sync all in flight RX packets to LLC/DRAM */
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+	if (err)
+		dev_err(rvu->dev, "NIX RX software sync failed\n");
+
+	/* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
+	 * bit too early. Hence wait for 50us more.
+	 */
+	if (is_rvu_9xxx_A0(rvu))
+		usleep_range(50, 60);
+}
+
 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 			    int lvl, u16 pcifunc, u16 schq)
 {
@@ -109,12 +137,12 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 	if (schq >= txsch->schq.max)
 		return false;
 
-	spin_lock(&rvu->rsrc_lock);
+	mutex_lock(&rvu->rsrc_lock);
 	if (txsch->pfvf_map[schq] != pcifunc) {
-		spin_unlock(&rvu->rsrc_lock);
+		mutex_unlock(&rvu->rsrc_lock);
 		return false;
 	}
-	spin_unlock(&rvu->rsrc_lock);
+	mutex_unlock(&rvu->rsrc_lock);
 	return true;
 }
 
@@ -122,7 +150,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
 {
 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 	u8 cgx_id, lmac_id;
-	int pkind, pf;
+	int pkind, pf, vf;
 	int err;
 
 	pf = rvu_get_pf(pcifunc);
@@ -148,6 +176,14 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
 		rvu_npc_set_pkind(rvu, pkind, pfvf);
 		break;
 	case NIX_INTF_TYPE_LBK:
+		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
+		pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
+						NIX_CHAN_LBK_CHX(0, vf + 1);
+		pfvf->rx_chan_cnt = 1;
+		pfvf->tx_chan_cnt = 1;
+		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+					      pfvf->rx_chan_base, false);
 		break;
 	}
 
@@ -168,14 +204,21 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
 
 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
 					  nixlf, pfvf->rx_chan_base);
+	pfvf->maxlen = NIC_HW_MIN_FRS;
+	pfvf->minlen = NIC_HW_MIN_FRS;
 
 	return 0;
 }
 
 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
 {
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 	int err;
 
+	pfvf->maxlen = 0;
+	pfvf->minlen = 0;
+	pfvf->rxvlan = false;
+
 	/* Remove this PF_FUNC from bcast pkt replication list */
 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
 	if (err) {
@@ -637,25 +680,25 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 	return err;
 }
 
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
 				struct nix_aq_enq_req *req,
 				struct nix_aq_enq_rsp *rsp)
 {
 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
 }
 
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
 				       struct hwctx_disable_req *req,
 				       struct msg_rsp *rsp)
 {
 	return nix_lf_hwctx_disable(rvu, req);
 }
 
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
 				  struct nix_lf_alloc_req *req,
 				  struct nix_lf_alloc_rsp *rsp)
 {
-	int nixlf, qints, hwctx_size, err, rc = 0;
+	int nixlf, qints, hwctx_size, intf, err, rc = 0;
 	struct rvu_hwinfo *hw = rvu->hw;
 	u16 pcifunc = req->hdr.pcifunc;
 	struct rvu_block *block;
@@ -676,6 +719,24 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
 	if (nixlf < 0)
 		return NIX_AF_ERR_AF_LF_INVALID;
 
+	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
+	if (req->npa_func) {
+		/* If default, use 'this' NIXLF's PFFUNC */
+		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+			req->npa_func = pcifunc;
+		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
+			return NIX_AF_INVAL_NPA_PF_FUNC;
+	}
+
+	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
+	if (req->sso_func) {
+		/* If default, use 'this' NIXLF's PFFUNC */
+		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+			req->sso_func = pcifunc;
+		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
+			return NIX_AF_INVAL_SSO_PF_FUNC;
+	}
+
 	/* If RSS is being enabled, check if requested config is valid.
 	 * RSS table size should be power of two, otherwise
 	 * RSS_GRP::OFFSET + adder might go beyond that group or
@@ -780,18 +841,10 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
 	/* Enable LMTST for this NIX LF */
 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
 
-	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
-	 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
-	 * PCIFUNC itself.
-	 */
-	if (req->npa_func == RVU_DEFAULT_PF_FUNC)
-		cfg = pcifunc;
-	else
+	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
+	if (req->npa_func)
 		cfg = req->npa_func;
-
-	if (req->sso_func == RVU_DEFAULT_PF_FUNC)
-		cfg |= (u64)pcifunc << 16;
-	else
+	if (req->sso_func)
 		cfg |= (u64)req->sso_func << 16;
 
 	cfg |= (u64)req->xqe_sz << 33;
@@ -800,10 +853,14 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
 	/* Config Rx pkt length, csum checks and apad  enable / disable */
 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
 
-	err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
+	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
 	if (err)
 		goto free_mem;
 
+	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
+	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+
 	goto exit;
 
 free_mem:
@@ -823,10 +880,18 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+	/* Get HW supported stat count */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
+	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
+	/* Get count of CQ IRQs and error IRQs supported per LF */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+	rsp->qints = ((cfg >> 12) & 0xFFF);
+	rsp->cints = ((cfg >> 24) & 0xFFF);
 	return rc;
 }
 
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
 				 struct msg_rsp *rsp)
 {
 	struct rvu_hwinfo *hw = rvu->hw;
@@ -918,7 +983,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
 }
 
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 				     struct nix_txsch_alloc_req *req,
 				     struct nix_txsch_alloc_rsp *rsp)
 {
@@ -939,7 +1004,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
 	if (!nix_hw)
 		return -EINVAL;
 
-	spin_lock(&rvu->rsrc_lock);
+	mutex_lock(&rvu->rsrc_lock);
 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
 		txsch = &nix_hw->txsch[lvl];
 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
@@ -995,7 +1060,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
 err:
 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
 exit:
-	spin_unlock(&rvu->rsrc_lock);
+	mutex_unlock(&rvu->rsrc_lock);
 	return rc;
 }
 
@@ -1020,7 +1085,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 		return NIX_AF_ERR_AF_LF_INVALID;
 
 	/* Disable TL2/3 queue links before SMQ flush*/
-	spin_lock(&rvu->rsrc_lock);
+	mutex_lock(&rvu->rsrc_lock);
 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
 			continue;
@@ -1062,7 +1127,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 			txsch->pfvf_map[schq] = 0;
 		}
 	}
-	spin_unlock(&rvu->rsrc_lock);
+	mutex_unlock(&rvu->rsrc_lock);
 
 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
@@ -1073,7 +1138,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 	return 0;
 }
 
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
 				    struct nix_txsch_free_req *req,
 				    struct msg_rsp *rsp)
 {
@@ -1118,7 +1183,7 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
 	return true;
 }
 
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 				    struct nix_txschq_config *req,
 				    struct msg_rsp *rsp)
 {
@@ -1181,35 +1246,22 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
 			   struct nix_vtag_config *req)
 {
-	u64 regval = 0;
+	u64 regval = req->vtag_size;
 
-#define NIX_VTAGTYPE_MAX 0x8ull
-#define NIX_VTAGSIZE_MASK 0x7ull
-#define NIX_VTAGSTRIP_CAP_MASK 0x30ull
-
-	if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
-	    req->vtag_size > VTAGSIZE_T8)
+	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
 		return -EINVAL;
 
-	regval = rvu_read64(rvu, blkaddr,
-			    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
-
-	if (req->rx.strip_vtag && req->rx.capture_vtag)
-		regval |= BIT_ULL(4) | BIT_ULL(5);
-	else if (req->rx.strip_vtag)
+	if (req->rx.capture_vtag)
+		regval |= BIT_ULL(5);
+	if (req->rx.strip_vtag)
 		regval |= BIT_ULL(4);
-	else
-		regval &= ~(BIT_ULL(4) | BIT_ULL(5));
-
-	regval &= ~NIX_VTAGSIZE_MASK;
-	regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
 
 	rvu_write64(rvu, blkaddr,
 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
 	return 0;
 }
 
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
 				  struct nix_vtag_config *req,
 				  struct msg_rsp *rsp)
 {
@@ -1294,7 +1346,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
 		return 0;
 
 	/* Add a new one to the list, at the tail */
-	mce = kzalloc(sizeof(*mce), GFP_ATOMIC);
+	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
 	if (!mce)
 		return -ENOMEM;
 	mce->idx = idx;
@@ -1317,6 +1369,10 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
 	struct rvu_pfvf *pfvf;
 	int blkaddr;
 
+	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
+	if (is_afvf(pcifunc))
+		return 0;
+
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 	if (blkaddr < 0)
 		return 0;
@@ -1340,7 +1396,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
 		return -EINVAL;
 	}
 
-	spin_lock(&mcast->mce_lock);
+	mutex_lock(&mcast->mce_lock);
 
 	err = nix_update_mce_list(mce_list, pcifunc, idx, add);
 	if (err)
@@ -1370,7 +1426,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
 	}
 
 end:
-	spin_unlock(&mcast->mce_lock);
+	mutex_unlock(&mcast->mce_lock);
 	return err;
 }
 
@@ -1455,7 +1511,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
 		    BIT_ULL(20) | MC_BUF_CNT);
 
-	spin_lock_init(&mcast->mce_lock);
+	mutex_init(&mcast->mce_lock);
 
 	return nix_setup_bcast_tables(rvu, nix_hw);
 }
@@ -1506,7 +1562,7 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 	return 0;
 }
 
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
 				   struct msg_rsp *rsp)
 {
 	struct rvu_hwinfo *hw = rvu->hw;
@@ -1564,7 +1620,7 @@ static int get_flowkey_alg_idx(u32 flow_cfg)
 	return FLOW_KEY_ALG_PORT;
 }
 
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
 					 struct nix_rss_flowkey_cfg *req,
 					 struct msg_rsp *rsp)
 {
@@ -1720,7 +1776,7 @@ static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
 	}
 }
 
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
 				      struct nix_set_mac_addr *req,
 				      struct msg_rsp *rsp)
 {
@@ -1742,10 +1798,13 @@ int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
 
 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
 				    pfvf->rx_chan_base, req->mac_addr);
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
 	return 0;
 }
 
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
 				     struct msg_rsp *rsp)
 {
 	bool allmulti = false, disable_promisc = false;
@@ -1775,9 +1834,261 @@ int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
 	else
 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
 					      pfvf->rx_chan_base, allmulti);
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
 	return 0;
 }
 
+static void nix_find_link_frs(struct rvu *rvu,
+			      struct nix_frs_cfg *req, u16 pcifunc)
+{
+	int pf = rvu_get_pf(pcifunc);
+	struct rvu_pfvf *pfvf;
+	int maxlen, minlen;
+	int numvfs, hwvf;
+	int vf;
+
+	/* Update with requester's min/max lengths */
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	pfvf->maxlen = req->maxlen;
+	if (req->update_minlen)
+		pfvf->minlen = req->minlen;
+
+	maxlen = req->maxlen;
+	minlen = req->update_minlen ? req->minlen : 0;
+
+	/* Get this PF's numVFs and starting hwvf */
+	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+	/* For each VF, compare requested max/minlen */
+	for (vf = 0; vf < numvfs; vf++) {
+		pfvf =  &rvu->hwvf[hwvf + vf];
+		if (pfvf->maxlen > maxlen)
+			maxlen = pfvf->maxlen;
+		if (req->update_minlen &&
+		    pfvf->minlen && pfvf->minlen < minlen)
+			minlen = pfvf->minlen;
+	}
+
+	/* Compare requested max/minlen with PF's max/minlen */
+	pfvf = &rvu->pf[pf];
+	if (pfvf->maxlen > maxlen)
+		maxlen = pfvf->maxlen;
+	if (req->update_minlen &&
+	    pfvf->minlen && pfvf->minlen < minlen)
+		minlen = pfvf->minlen;
+
+	/* Update the request with max/min PF's and it's VF's max/min */
+	req->maxlen = maxlen;
+	if (req->update_minlen)
+		req->minlen = minlen;
+}
+
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+				    struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int pf = rvu_get_pf(pcifunc);
+	int blkaddr, schq, link = -1;
+	struct nix_txsch *txsch;
+	u64 cfg, lmac_fifo_len;
+	struct nix_hw *nix_hw;
+	u8 cgx = 0, lmac = 0;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+		return NIX_AF_ERR_FRS_INVALID;
+
+	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+		return NIX_AF_ERR_FRS_INVALID;
+
+	/* Check if requester wants to update SMQ's */
+	if (!req->update_smq)
+		goto rx_frscfg;
+
+	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
+	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+	mutex_lock(&rvu->rsrc_lock);
+	for (schq = 0; schq < txsch->schq.max; schq++) {
+		if (txsch->pfvf_map[schq] != pcifunc)
+			continue;
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
+		if (req->update_minlen)
+			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
+		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+	}
+	mutex_unlock(&rvu->rsrc_lock);
+
+rx_frscfg:
+	/* Check if config is for SDP link */
+	if (req->sdp_link) {
+		if (!hw->sdp_links)
+			return NIX_AF_ERR_RX_LINK_INVALID;
+		link = hw->cgx_links + hw->lbk_links;
+		goto linkcfg;
+	}
+
+	/* Check if the request is from CGX mapped RVU PF */
+	if (is_pf_cgxmapped(rvu, pf)) {
+		/* Get CGX and LMAC to which this PF is mapped and find link */
+		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+		link = (cgx * hw->lmac_per_cgx) + lmac;
+	} else if (pf == 0) {
+		/* For VFs of PF0 ingress is LBK port, so config LBK link */
+		link = hw->cgx_links;
+	}
+
+	if (link < 0)
+		return NIX_AF_ERR_RX_LINK_INVALID;
+
+	nix_find_link_frs(rvu, req, pcifunc);
+
+linkcfg:
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+	if (req->update_minlen)
+		cfg = (cfg & ~0xFFFFULL) | req->minlen;
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+	if (req->sdp_link || pf == 0)
+		return 0;
+
+	/* Update transmit credits for CGX links */
+	lmac_fifo_len =
+		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+	cfg &= ~(0xFFFFFULL << 12);
+	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
+	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+				      struct msg_rsp *rsp)
+{
+	struct npc_mcam_alloc_entry_req alloc_req = { };
+	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
+	struct npc_mcam_free_entry_req free_req = { };
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, nixlf, err;
+	struct rvu_pfvf *pfvf;
+
+	/* LBK VFs do not have separate MCAM UCAST entry hence
+	 * skip allocating rxvlan for them
+	 */
+	if (is_afvf(pcifunc))
+		return 0;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	if (pfvf->rxvlan)
+		return 0;
+
+	/* alloc new mcam entry */
+	alloc_req.hdr.pcifunc = pcifunc;
+	alloc_req.count = 1;
+
+	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+						    &alloc_rsp);
+	if (err)
+		return err;
+
+	/* update entry to enable rxvlan offload */
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0) {
+		err = NIX_AF_ERR_AF_LF_INVALID;
+		goto free_entry;
+	}
+
+	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0) {
+		err = NIX_AF_ERR_AF_LF_INVALID;
+		goto free_entry;
+	}
+
+	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
+	/* all it means is that rxvlan_index is valid */
+	pfvf->rxvlan = true;
+
+	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+	if (err)
+		goto free_entry;
+
+	return 0;
+free_entry:
+	free_req.hdr.pcifunc = pcifunc;
+	free_req.entry = alloc_rsp.entry_list[0];
+	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
+	pfvf->rxvlan = false;
+	return err;
+}
+
+static void nix_link_config(struct rvu *rvu, int blkaddr)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	int cgx, lmac_cnt, slink, link;
+	u64 tx_credits;
+
+	/* Set default min/max packet lengths allowed on NIX Rx links.
+	 *
+	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
+	 * as undersize and report them to SW as error pkts, hence
+	 * setting it to 40 bytes.
+	 */
+	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+	}
+
+	if (hw->sdp_links) {
+		link = hw->cgx_links + hw->lbk_links;
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+	}
+
+	/* Set credits for Tx links assuming max packet length allowed.
+	 * This will be reconfigured based on MTU set for PF/VF.
+	 */
+	for (cgx = 0; cgx < hw->cgx; cgx++) {
+		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+		/* Enable credits and set credit pkt count to max allowed */
+		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+		slink = cgx * hw->lmac_per_cgx;
+		for (link = slink; link < (slink + lmac_cnt); link++) {
+			rvu_write64(rvu, blkaddr,
+				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
+				    tx_credits);
+			rvu_write64(rvu, blkaddr,
+				    NIX_AF_TX_LINKX_EXPR_CREDIT(link),
+				    tx_credits);
+		}
+	}
+
+	/* Set Tx credits for LBK link */
+	slink = hw->cgx_links;
+	for (link = slink; link < (slink + hw->lbk_links); link++) {
+		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+		/* Enable credits and set credit pkt count to max allowed */
+		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
+	}
+}
+
 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
 {
 	int idx, err;
@@ -1796,8 +2107,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
 
 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
 	/* Check if CGX devices are ready */
-	for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
-		if (status & (BIT_ULL(16 + idx)))
+	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
+		/* Skip when cgx port is not available */
+		if (!rvu_cgx_pdata(idx, rvu) ||
+		    (status & (BIT_ULL(16 + idx))))
 			continue;
 		dev_err(rvu->dev,
 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
@@ -1830,10 +2143,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
 	/* Set admin queue endianness */
 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
 #ifdef __BIG_ENDIAN
-	cfg |= BIT_ULL(1);
+	cfg |= BIT_ULL(8);
 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
 #else
-	cfg &= ~BIT_ULL(1);
+	cfg &= ~BIT_ULL(8);
 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
 #endif
 
@@ -1870,6 +2183,14 @@ int rvu_nix_init(struct rvu *rvu)
 		return 0;
 	block = &hw->block[blkaddr];
 
+	/* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
+	 * internal state when conditional clocks are turned off.
+	 * Hence enable them.
+	 */
+	if (is_rvu_9xxx_A0(rvu))
+		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+
 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
 	err = nix_calibrate_x2p(rvu, blkaddr);
 	if (err)
@@ -1922,6 +2243,9 @@ int rvu_nix_init(struct rvu *rvu)
 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
 
 		nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+
+		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
+		nix_link_config(rvu, blkaddr);
 	}
 	return 0;
 }
@@ -1955,5 +2279,88 @@ void rvu_nix_freemem(struct rvu *rvu)
 		mcast = &nix_hw->mcast;
 		qmem_free(rvu->dev, mcast->mce_ctx);
 		qmem_free(rvu->dev, mcast->mcast_buf);
+		mutex_destroy(&mcast->mce_lock);
 	}
 }
+
+static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct rvu_hwinfo *hw = rvu->hw;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (*nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+				     struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	int nixlf, err;
+
+	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+	if (err)
+		return err;
+
+	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+	return 0;
+}
+
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+				    struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	int nixlf, err;
+
+	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+	if (err)
+		return err;
+
+	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+	return 0;
+}
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct hwctx_disable_req ctx_req;
+	int err;
+
+	ctx_req.hdr.pcifunc = pcifunc;
+
+	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+	nix_interface_deinit(rvu, pcifunc, nixlf);
+	nix_rx_sync(rvu, blkaddr);
+	nix_txschq_free(rvu, pcifunc);
+
+	if (pfvf->sq_ctx) {
+		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+		err = nix_lf_hwctx_disable(rvu, &ctx_req);
+		if (err)
+			dev_err(rvu->dev, "SQ ctx disable failed\n");
+	}
+
+	if (pfvf->rq_ctx) {
+		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+		err = nix_lf_hwctx_disable(rvu, &ctx_req);
+		if (err)
+			dev_err(rvu->dev, "RQ ctx disable failed\n");
+	}
+
+	if (pfvf->cq_ctx) {
+		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+		err = nix_lf_hwctx_disable(rvu, &ctx_req);
+		if (err)
+			dev_err(rvu->dev, "CQ ctx disable failed\n");
+	}
+
+	nix_ctx_free(rvu, pfvf);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 7531fdc..c0e165d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -241,14 +241,14 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 	return err;
 }
 
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
 				struct npa_aq_enq_req *req,
 				struct npa_aq_enq_rsp *rsp)
 {
 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
 }
 
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
 				       struct hwctx_disable_req *req,
 				       struct msg_rsp *rsp)
 {
@@ -273,7 +273,7 @@ static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
 	pfvf->npa_qints_ctx = NULL;
 }
 
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
 				  struct npa_lf_alloc_req *req,
 				  struct npa_lf_alloc_rsp *rsp)
 {
@@ -372,7 +372,7 @@ int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
 	return rc;
 }
 
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
 				 struct msg_rsp *rsp)
 {
 	struct rvu_hwinfo *hw = rvu->hw;
@@ -470,3 +470,20 @@ void rvu_npa_freemem(struct rvu *rvu)
 	block = &hw->block[blkaddr];
 	rvu_aq_free(rvu, block->aq);
 }
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct hwctx_disable_req ctx_req;
+
+	/* Disable all pools */
+	ctx_req.hdr.pcifunc = pcifunc;
+	ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+	npa_lf_hwctx_disable(rvu, &ctx_req);
+
+	/* Disable all auras */
+	ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+	npa_lf_hwctx_disable(rvu, &ctx_req);
+
+	npa_ctx_free(rvu, pfvf);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 23ff47f..6ea2b0e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/bitfield.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 
@@ -26,13 +27,10 @@
 
 #define NPC_PARSE_RESULT_DMAC_OFFSET	8
 
-struct mcam_entry {
-#define NPC_MAX_KWS_IN_KEY	7 /* Number of keywords in max keywidth */
-	u64	kw[NPC_MAX_KWS_IN_KEY];
-	u64	kw_mask[NPC_MAX_KWS_IN_KEY];
-	u64	action;
-	u64	vtag_action;
-};
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+				      int blkaddr, u16 pcifunc);
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+				       u16 pcifunc);
 
 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
 {
@@ -256,6 +254,46 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
 		npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
 }
 
+static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+				int blkaddr, u16 src, u16 dest)
+{
+	int dbank = npc_get_bank(mcam, dest);
+	int sbank = npc_get_bank(mcam, src);
+	u64 cfg, sreg, dreg;
+	int bank, i;
+
+	src &= (mcam->banksize - 1);
+	dest &= (mcam->banksize - 1);
+
+	/* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */
+	for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+		sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0);
+		dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0);
+		for (i = 0; i < 6; i++) {
+			cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
+			rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
+		}
+	}
+
+	/* Copy action */
+	cfg = rvu_read64(rvu, blkaddr,
+			 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg);
+
+	/* Copy TAG action */
+	cfg = rvu_read64(rvu, blkaddr,
+			 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg);
+
+	/* Enable or disable */
+	cfg = rvu_read64(rvu, blkaddr,
+			 NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+}
+
 static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
 			       int blkaddr, int index)
 {
@@ -269,12 +307,17 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
 				 int nixlf, u64 chan, u8 *mac_addr)
 {
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 	struct npc_mcam *mcam = &rvu->hw->mcam;
 	struct mcam_entry entry = { {0} };
 	struct nix_rx_action action;
 	int blkaddr, index, kwi;
 	u64 mac = 0;
 
+	/* AF's VFs work in promiscuous mode */
+	if (is_afvf(pcifunc))
+		return;
+
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
 	if (blkaddr < 0)
 		return;
@@ -308,6 +351,17 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
 	entry.action = *(u64 *)&action;
 	npc_config_mcam_entry(rvu, mcam, blkaddr, index,
 			      NIX_INTF_RX, &entry, true);
+
+	/* add VLAN matching, setup action and save entry back for later */
+	entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
+	entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+
+	entry.vtag_action = VTAG0_VALID_BIT |
+			    FIELD_PREP(VTAG0_TYPE_MASK, 0) |
+			    FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
+			    FIELD_PREP(VTAG0_RELPTR_MASK, 12);
+
+	memcpy(&pfvf->entry, &entry, sizeof(entry));
 }
 
 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
@@ -315,17 +369,17 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
 {
 	struct npc_mcam *mcam = &rvu->hw->mcam;
 	struct mcam_entry entry = { {0} };
-	struct nix_rx_action action;
+	struct nix_rx_action action = { };
 	int blkaddr, index, kwi;
 
+	/* Only PF or AF VF can add a promiscuous entry */
+	if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
+		return;
+
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
 	if (blkaddr < 0)
 		return;
 
-	/* Only PF or AF VF can add a promiscuous entry */
-	if (pcifunc & RVU_PFVF_FUNC_MASK)
-		return;
-
 	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
 					 nixlf, NIXLF_PROMISC_ENTRY);
 
@@ -347,7 +401,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
 			      NIX_INTF_RX, &entry, true);
 }
 
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
+				     int nixlf, bool enable)
 {
 	struct npc_mcam *mcam = &rvu->hw->mcam;
 	int blkaddr, index;
@@ -362,7 +417,17 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
 
 	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
 					 nixlf, NIXLF_PROMISC_ENTRY);
-	npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
 }
 
 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
@@ -390,9 +455,28 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
 	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
 					 nixlf, NIXLF_BCAST_ENTRY);
 
-	/* Check for L2B bit and LMAC channel */
-	entry.kw[0] = BIT_ULL(25) | chan;
-	entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL;
+	/* Check for L2B bit and LMAC channel
+	 * NOTE: Since MKEX default profile(a reduced version intended to
+	 * accommodate more capability but igoring few bits) a stap-gap
+	 * approach.
+	 * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
+	 * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
+	 * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
+	 * DSCP.
+	 *
+	 * Reduced layout of MKEX default profile -
+	 * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
+	 *
+	 * BIT_POS[31:28] : LD
+	 * BIT_POS[27:24] : LC
+	 * BIT_POS[23:20] : LB
+	 * BIT_POS[19:16] : LA
+	 * BIT_POS[15:12] : L3B, L3M, L2B, L2M
+	 * BIT_POS[11:00] : CHAN
+	 *
+	 */
+	entry.kw[0] = BIT_ULL(13) | chan;
+	entry.kw_mask[0] = ~entry.kw[0] & (BIT_ULL(13) | 0xFFFULL);
 
 	*(u64 *)&action = 0x00;
 #ifdef MCAST_MCE
@@ -454,51 +538,95 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
 
 	rvu_write64(rvu, blkaddr,
 		    NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
 }
 
-void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
+				       int nixlf, bool enable)
 {
 	struct npc_mcam *mcam = &rvu->hw->mcam;
 	struct nix_rx_action action;
-	int blkaddr, index, bank;
+	int index, bank, blkaddr;
 
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
 	if (blkaddr < 0)
 		return;
 
-	/* Disable ucast MCAM match entry of this PF/VF */
+	/* Ucast MCAM match entry of this PF/VF */
 	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
 					 nixlf, NIXLF_UCAST_ENTRY);
-	npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
 
-	/* For PF, disable promisc and bcast MCAM match entries */
-	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
-		index = npc_get_nixlf_mcam_index(mcam, pcifunc,
-						 nixlf, NIXLF_BCAST_ENTRY);
-		/* For bcast, disable only if it's action is not
-		 * packet replication, incase if action is replication
-		 * then this PF's nixlf is removed from bcast replication
-		 * list.
-		 */
-		bank = npc_get_bank(mcam, index);
-		index &= (mcam->banksize - 1);
-		*(u64 *)&action = rvu_read64(rvu, blkaddr,
-				     NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
-		if (action.op != NIX_RX_ACTIONOP_MCAST)
-			npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+	/* For PF, ena/dis promisc and bcast MCAM match entries */
+	if (pcifunc & RVU_PFVF_FUNC_MASK)
+		return;
 
+	/* For bcast, enable/disable only if it's action is not
+	 * packet replication, incase if action is replication
+	 * then this PF's nixlf is removed from bcast replication
+	 * list.
+	 */
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_BCAST_ENTRY);
+	bank = npc_get_bank(mcam, index);
+	*(u64 *)&action = rvu_read64(rvu, blkaddr,
+	     NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
+	if (action.op != NIX_RX_ACTIONOP_MCAST)
+		npc_enable_mcam_entry(rvu, mcam,
+				      blkaddr, index, enable);
+	if (enable)
+		rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
+	else
 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
-	}
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
 }
 
-#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	mutex_lock(&mcam->lock);
+
+	/* Disable and free all MCAM entries mapped to this 'pcifunc' */
+	npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+
+	/* Free all MCAM counters mapped to this 'pcifunc' */
+	npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+
+	mutex_unlock(&mcam->lock);
+
+	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg)	\
 	rvu_write64(rvu, blkaddr,			\
 		NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
 
-#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg)	\
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg)	\
 	rvu_write64(rvu, blkaddr,			\
 		NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
 
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs)		\
+			(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+			 ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
 static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
 {
 	struct npc_mcam *mcam = &rvu->hw->mcam;
@@ -514,28 +642,66 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
 	 */
 	for (lid = 0; lid < lid_count; lid++) {
 		for (ltype = 0; ltype < 16; ltype++) {
-			LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL);
-			LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL);
-			LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL);
-			LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+			SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+			SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+			SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+			SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
 
-			LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL);
-			LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL);
-			LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL);
-			LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL);
+			SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
+			SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
+			SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
+			SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
 		}
 	}
 
-	/* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts
-	 * then 112bit key is not sufficient
-	 */
 	if (mcam->keysize != NPC_MCAM_KEY_X2)
 		return;
 
-	/* Start placing extracted data/flags from 64bit onwards, for now */
-	/* Extract DMAC from the packet */
-	cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET;
-	LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+	/* Default MCAM KEX profile */
+	/* Layer A: Ethernet: */
+
+	/* DMAC: 6 bytes, KW1[47:0] */
+	cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+
+	/* Ethertype: 2 bytes, KW0[47:32] */
+	cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
+
+	/* Layer B: Single VLAN (CTAG) */
+	/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+	cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
+
+	/* Layer B: Stacked VLAN (STAG|QinQ) */
+	/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+	cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+
+	/* Layer C: IPv4 */
+	/* SIP+DIP: 8 bytes, KW2[63:0] */
+	cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
+	/* TOS: 1 byte, KW1[63:56] */
+	cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
+
+	/* Layer D:UDP */
+	/* SPORT: 2 bytes, KW3[15:0] */
+	cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
+	/* DPORT: 2 bytes, KW3[31:16] */
+	cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
+
+	/* Layer D:TCP */
+	/* SPORT: 2 bytes, KW3[15:0] */
+	cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
+	/* DPORT: 2 bytes, KW3[31:16] */
+	cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
 }
 
 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
@@ -690,13 +856,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
 {
 	int nixlf_count = rvu_get_nixlf_count(rvu);
 	struct npc_mcam *mcam = &rvu->hw->mcam;
-	int rsvd;
+	int rsvd, err;
 	u64 cfg;
 
 	/* Get HW limits */
 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
 	mcam->banks = (cfg >> 44) & 0xF;
 	mcam->banksize = (cfg >> 28) & 0xFFFF;
+	mcam->counters.max = (cfg >> 48) & 0xFFFF;
 
 	/* Actual number of MCAM entries vary by entry size */
 	cfg = (rvu_read64(rvu, blkaddr,
@@ -728,20 +895,82 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
 		return -ENOMEM;
 	}
 
-	mcam->entries = mcam->total_entries - rsvd;
-	mcam->nixlf_offset = mcam->entries;
+	mcam->bmap_entries = mcam->total_entries - rsvd;
+	mcam->nixlf_offset = mcam->bmap_entries;
 	mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
 
-	spin_lock_init(&mcam->lock);
+	/* Allocate bitmaps for managing MCAM entries */
+	mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
+				  sizeof(long), GFP_KERNEL);
+	if (!mcam->bmap)
+		return -ENOMEM;
+
+	mcam->bmap_reverse = devm_kcalloc(rvu->dev,
+					  BITS_TO_LONGS(mcam->bmap_entries),
+					  sizeof(long), GFP_KERNEL);
+	if (!mcam->bmap_reverse)
+		return -ENOMEM;
+
+	mcam->bmap_fcnt = mcam->bmap_entries;
+
+	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+	mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+					    sizeof(u16), GFP_KERNEL);
+	if (!mcam->entry2pfvf_map)
+		return -ENOMEM;
+
+	/* Reserve 1/8th of MCAM entries at the bottom for low priority
+	 * allocations and another 1/8th at the top for high priority
+	 * allocations.
+	 */
+	mcam->lprio_count = mcam->bmap_entries / 8;
+	if (mcam->lprio_count > BITS_PER_LONG)
+		mcam->lprio_count = round_down(mcam->lprio_count,
+					       BITS_PER_LONG);
+	mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+	mcam->hprio_count = mcam->lprio_count;
+	mcam->hprio_end = mcam->hprio_count;
+
+	/* Allocate bitmap for managing MCAM counters and memory
+	 * for saving counter to RVU PFFUNC allocation mapping.
+	 */
+	err = rvu_alloc_bitmap(&mcam->counters);
+	if (err)
+		return err;
+
+	mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
+					   sizeof(u16), GFP_KERNEL);
+	if (!mcam->cntr2pfvf_map)
+		goto free_mem;
+
+	/* Alloc memory for MCAM entry to counter mapping and for tracking
+	 * counter's reference count.
+	 */
+	mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+					    sizeof(u16), GFP_KERNEL);
+	if (!mcam->entry2cntr_map)
+		goto free_mem;
+
+	mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
+					 sizeof(u16), GFP_KERNEL);
+	if (!mcam->cntr_refcnt)
+		goto free_mem;
+
+	mutex_init(&mcam->lock);
 
 	return 0;
+
+free_mem:
+	kfree(mcam->counters.bmap);
+	return -ENOMEM;
 }
 
 int rvu_npc_init(struct rvu *rvu)
 {
 	struct npc_pkind *pkind = &rvu->hw->pkind;
 	u64 keyz = NPC_MCAM_KEY_X2;
-	int blkaddr, err;
+	int blkaddr, entry, bank, err;
+	u64 cfg, nibble_ena;
 
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
 	if (blkaddr < 0) {
@@ -749,6 +978,14 @@ int rvu_npc_init(struct rvu *rvu)
 		return -ENODEV;
 	}
 
+	/* First disable all MCAM entries, to stop traffic towards NIXLFs */
+	cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+	for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
+		for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+			rvu_write64(rvu, blkaddr,
+				    NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
+	}
+
 	/* Allocate resource bimap for pkind*/
 	pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
 				      NPC_AF_CONST1) >> 12) & 0xFF;
@@ -780,13 +1017,18 @@ int rvu_npc_init(struct rvu *rvu)
 		    BIT_ULL(6) | BIT_ULL(2));
 
 	/* Set RX and TX side MCAM search key size.
-	 * Also enable parse key extract nibbles suchthat except
-	 * layer E to H, rest of the key is included for MCAM search.
+	 * LA..LD (ltype only) + Channel
 	 */
+	nibble_ena = 0x49247;
 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
-		    ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+			((keyz & 0x3) << 32) | nibble_ena);
+	/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
+	 * configuration has to be identical for both Rx and Tx interfaces.
+	 */
+	if (!is_rvu_9xxx_A0(rvu))
+		nibble_ena = (1ULL << 19) - 1;
 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
-		    ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+			((keyz & 0x3) << 32) | nibble_ena);
 
 	err = npc_mcam_rsrcs_init(rvu, blkaddr);
 	if (err)
@@ -811,6 +1053,1019 @@ int rvu_npc_init(struct rvu *rvu)
 void rvu_npc_freemem(struct rvu *rvu)
 {
 	struct npc_pkind *pkind = &rvu->hw->pkind;
+	struct npc_mcam *mcam = &rvu->hw->mcam;
 
 	kfree(pkind->rsrc.bmap);
+	kfree(mcam->counters.bmap);
+	mutex_destroy(&mcam->lock);
+}
+
+static int npc_mcam_verify_entry(struct npc_mcam *mcam,
+				 u16 pcifunc, int entry)
+{
+	/* Verify if entry is valid and if it is indeed
+	 * allocated to the requesting PFFUNC.
+	 */
+	if (entry >= mcam->bmap_entries)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (pcifunc != mcam->entry2pfvf_map[entry])
+		return NPC_MCAM_PERM_DENIED;
+
+	return 0;
+}
+
+static int npc_mcam_verify_counter(struct npc_mcam *mcam,
+				   u16 pcifunc, int cntr)
+{
+	/* Verify if counter is valid and if it is indeed
+	 * allocated to the requesting PFFUNC.
+	 */
+	if (cntr >= mcam->counters.max)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (pcifunc != mcam->cntr2pfvf_map[cntr])
+		return NPC_MCAM_PERM_DENIED;
+
+	return 0;
+}
+
+static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
+					int blkaddr, u16 entry, u16 cntr)
+{
+	u16 index = entry & (mcam->banksize - 1);
+	u16 bank = npc_get_bank(mcam, entry);
+
+	/* Set mapping and increment counter's refcnt */
+	mcam->entry2cntr_map[entry] = cntr;
+	mcam->cntr_refcnt[cntr]++;
+	/* Enable stats */
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
+		    BIT_ULL(9) | cntr);
+}
+
+static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
+					  struct npc_mcam *mcam,
+					  int blkaddr, u16 entry, u16 cntr)
+{
+	u16 index = entry & (mcam->banksize - 1);
+	u16 bank = npc_get_bank(mcam, entry);
+
+	/* Remove mapping and reduce counter's refcnt */
+	mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
+	mcam->cntr_refcnt[cntr]--;
+	/* Disable stats */
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00);
+}
+
+/* Sets MCAM entry in bitmap as used. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index)
+{
+	u16 entry, rentry;
+
+	entry = index;
+	rentry = mcam->bmap_entries - index - 1;
+
+	__set_bit(entry, mcam->bmap);
+	__set_bit(rentry, mcam->bmap_reverse);
+	mcam->bmap_fcnt--;
+}
+
+/* Sets MCAM entry in bitmap as free. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
+{
+	u16 entry, rentry;
+
+	entry = index;
+	rentry = mcam->bmap_entries - index - 1;
+
+	__clear_bit(entry, mcam->bmap);
+	__clear_bit(rentry, mcam->bmap_reverse);
+	mcam->bmap_fcnt++;
+}
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+				      int blkaddr, u16 pcifunc)
+{
+	u16 index, cntr;
+
+	/* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
+	for (index = 0; index < mcam->bmap_entries; index++) {
+		if (mcam->entry2pfvf_map[index] == pcifunc) {
+			mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+			/* Free the entry in bitmap */
+			npc_mcam_clear_bit(mcam, index);
+			/* Disable the entry */
+			npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+			/* Update entry2counter mapping */
+			cntr = mcam->entry2cntr_map[index];
+			if (cntr != NPC_MCAM_INVALID_MAP)
+				npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+							      blkaddr, index,
+							      cntr);
+		}
+	}
+}
+
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+				       u16 pcifunc)
+{
+	u16 cntr;
+
+	/* Scan all MCAM counters and free the ones mapped to 'pcifunc' */
+	for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+		if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+			mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+			mcam->cntr_refcnt[cntr] = 0;
+			rvu_free_rsrc(&mcam->counters, cntr);
+			/* This API is expected to be called after freeing
+			 * MCAM entries, which inturn will remove
+			 * 'entry to counter' mapping.
+			 * No need to do it again.
+			 */
+		}
+	}
+}
+
+/* Find area of contiguous free entries of size 'nr'.
+ * If not found return max contiguous free entries available.
+ */
+static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
+				   u16 nr, u16 *max_area)
+{
+	u16 max_area_start = 0;
+	u16 index, next, end;
+
+	*max_area = 0;
+
+again:
+	index = find_next_zero_bit(map, size, start);
+	if (index >= size)
+		return max_area_start;
+
+	end = ((index + nr) >= size) ? size : index + nr;
+	next = find_next_bit(map, end, index);
+	if (*max_area < (next - index)) {
+		*max_area = next - index;
+		max_area_start = index;
+	}
+
+	if (next < end) {
+		start = next + 1;
+		goto again;
+	}
+
+	return max_area_start;
+}
+
+/* Find number of free MCAM entries available
+ * within range i.e in between 'start' and 'end'.
+ */
+static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
+{
+	u16 index, next;
+	u16 fcnt = 0;
+
+again:
+	if (start >= end)
+		return fcnt;
+
+	index = find_next_zero_bit(map, end, start);
+	if (index >= end)
+		return fcnt;
+
+	next = find_next_bit(map, end, index);
+	if (next <= end) {
+		fcnt += next - index;
+		start = next + 1;
+		goto again;
+	}
+
+	fcnt += end - index;
+	return fcnt;
+}
+
+static void
+npc_get_mcam_search_range_priority(struct npc_mcam *mcam,
+				   struct npc_mcam_alloc_entry_req *req,
+				   u16 *start, u16 *end, bool *reverse)
+{
+	u16 fcnt;
+
+	if (req->priority == NPC_MCAM_HIGHER_PRIO)
+		goto hprio;
+
+	/* For a low priority entry allocation
+	 * - If reference entry is not in hprio zone then
+	 *      search range: ref_entry to end.
+	 * - If reference entry is in hprio zone and if
+	 *   request can be accomodated in non-hprio zone then
+	 *      search range: 'start of middle zone' to 'end'
+	 * - else search in reverse, so that less number of hprio
+	 *   zone entries are allocated.
+	 */
+
+	*reverse = false;
+	*start = req->ref_entry + 1;
+	*end = mcam->bmap_entries;
+
+	if (req->ref_entry >= mcam->hprio_end)
+		return;
+
+	fcnt = npc_mcam_get_free_count(mcam->bmap,
+				       mcam->hprio_end, mcam->bmap_entries);
+	if (fcnt > req->count)
+		*start = mcam->hprio_end;
+	else
+		*reverse = true;
+	return;
+
+hprio:
+	/* For a high priority entry allocation, search is always
+	 * in reverse to preserve hprio zone entries.
+	 * - If reference entry is not in lprio zone then
+	 *      search range: 0 to ref_entry.
+	 * - If reference entry is in lprio zone and if
+	 *   request can be accomodated in middle zone then
+	 *      search range: 'hprio_end' to 'lprio_start'
+	 */
+
+	*reverse = true;
+	*start = 0;
+	*end = req->ref_entry;
+
+	if (req->ref_entry <= mcam->lprio_start)
+		return;
+
+	fcnt = npc_mcam_get_free_count(mcam->bmap,
+				       mcam->hprio_end, mcam->lprio_start);
+	if (fcnt < req->count)
+		return;
+	*start = mcam->hprio_end;
+	*end = mcam->lprio_start;
+}
+
+static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+				  struct npc_mcam_alloc_entry_req *req,
+				  struct npc_mcam_alloc_entry_rsp *rsp)
+{
+	u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+	u16 fcnt, hp_fcnt, lp_fcnt;
+	u16 start, end, index;
+	int entry, next_start;
+	bool reverse = false;
+	unsigned long *bmap;
+	u16 max_contig;
+
+	mutex_lock(&mcam->lock);
+
+	/* Check if there are any free entries */
+	if (!mcam->bmap_fcnt) {
+		mutex_unlock(&mcam->lock);
+		return NPC_MCAM_ALLOC_FAILED;
+	}
+
+	/* MCAM entries are divided into high priority, middle and
+	 * low priority zones. Idea is to not allocate top and lower
+	 * most entries as much as possible, this is to increase
+	 * probability of honouring priority allocation requests.
+	 *
+	 * Two bitmaps are used for mcam entry management,
+	 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'.
+	 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'.
+	 *
+	 * Reverse bitmap is used to allocate entries
+	 * - when a higher priority entry is requested
+	 * - when available free entries are less.
+	 * Lower priority ones out of avaialble free entries are always
+	 * chosen when 'high vs low' question arises.
+	 */
+
+	/* Get the search range for priority allocation request */
+	if (req->priority) {
+		npc_get_mcam_search_range_priority(mcam, req,
+						   &start, &end, &reverse);
+		goto alloc;
+	}
+
+	/* Find out the search range for non-priority allocation request
+	 *
+	 * Get MCAM free entry count in middle zone.
+	 */
+	lp_fcnt = npc_mcam_get_free_count(mcam->bmap,
+					  mcam->lprio_start,
+					  mcam->bmap_entries);
+	hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end);
+	fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt;
+
+	/* Check if request can be accomodated in the middle zone */
+	if (fcnt > req->count) {
+		start = mcam->hprio_end;
+		end = mcam->lprio_start;
+	} else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
+		/* Expand search zone from half of hprio zone to
+		 * half of lprio zone.
+		 */
+		start = mcam->hprio_end / 2;
+		end = mcam->bmap_entries - (mcam->lprio_count / 2);
+		reverse = true;
+	} else {
+		/* Not enough free entries, search all entries in reverse,
+		 * so that low priority ones will get used up.
+		 */
+		reverse = true;
+		start = 0;
+		end = mcam->bmap_entries;
+	}
+
+alloc:
+	if (reverse) {
+		bmap = mcam->bmap_reverse;
+		start = mcam->bmap_entries - start;
+		end = mcam->bmap_entries - end;
+		index = start;
+		start = end;
+		end = index;
+	} else {
+		bmap = mcam->bmap;
+	}
+
+	if (req->contig) {
+		/* Allocate requested number of contiguous entries, if
+		 * unsuccessful find max contiguous entries available.
+		 */
+		index = npc_mcam_find_zero_area(bmap, end, start,
+						req->count, &max_contig);
+		rsp->count = max_contig;
+		if (reverse)
+			rsp->entry = mcam->bmap_entries - index - max_contig;
+		else
+			rsp->entry = index;
+	} else {
+		/* Allocate requested number of non-contiguous entries,
+		 * if unsuccessful allocate as many as possible.
+		 */
+		rsp->count = 0;
+		next_start = start;
+		for (entry = 0; entry < req->count; entry++) {
+			index = find_next_zero_bit(bmap, end, next_start);
+			if (index >= end)
+				break;
+
+			next_start = start + (index - start) + 1;
+
+			/* Save the entry's index */
+			if (reverse)
+				index = mcam->bmap_entries - index - 1;
+			entry_list[entry] = index;
+			rsp->count++;
+		}
+	}
+
+	/* If allocating requested no of entries is unsucessful,
+	 * expand the search range to full bitmap length and retry.
+	 */
+	if (!req->priority && (rsp->count < req->count) &&
+	    ((end - start) != mcam->bmap_entries)) {
+		reverse = true;
+		start = 0;
+		end = mcam->bmap_entries;
+		goto alloc;
+	}
+
+	/* For priority entry allocation requests, if allocation is
+	 * failed then expand search to max possible range and retry.
+	 */
+	if (req->priority && rsp->count < req->count) {
+		if (req->priority == NPC_MCAM_LOWER_PRIO &&
+		    (start != (req->ref_entry + 1))) {
+			start = req->ref_entry + 1;
+			end = mcam->bmap_entries;
+			reverse = false;
+			goto alloc;
+		} else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
+			   ((end - start) != req->ref_entry)) {
+			start = 0;
+			end = req->ref_entry;
+			reverse = true;
+			goto alloc;
+		}
+	}
+
+	/* Copy MCAM entry indices into mbox response entry_list.
+	 * Requester always expects indices in ascending order, so
+	 * so reverse the list if reverse bitmap is used for allocation.
+	 */
+	if (!req->contig && rsp->count) {
+		index = 0;
+		for (entry = rsp->count - 1; entry >= 0; entry--) {
+			if (reverse)
+				rsp->entry_list[index++] = entry_list[entry];
+			else
+				rsp->entry_list[entry] = entry_list[entry];
+		}
+	}
+
+	/* Mark the allocated entries as used and set nixlf mapping */
+	for (entry = 0; entry < rsp->count; entry++) {
+		index = req->contig ?
+			(rsp->entry + entry) : rsp->entry_list[entry];
+		npc_mcam_set_bit(mcam, index);
+		mcam->entry2pfvf_map[index] = pcifunc;
+		mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+	}
+
+	/* Update available free count in mbox response */
+	rsp->free_count = mcam->bmap_fcnt;
+
+	mutex_unlock(&mcam->lock);
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+					  struct npc_mcam_alloc_entry_req *req,
+					  struct npc_mcam_alloc_entry_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	rsp->entry = NPC_MCAM_ENTRY_INVALID;
+	rsp->free_count = 0;
+
+	/* Check if ref_entry is within range */
+	if (req->priority && req->ref_entry >= mcam->bmap_entries)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* ref_entry can't be '0' if requested priority is high.
+	 * Can't be last entry if requested priority is low.
+	 */
+	if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+	    ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+	     req->priority == NPC_MCAM_LOWER_PRIO))
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Since list of allocated indices needs to be sent to requester,
+	 * max number of non-contiguous entries per mbox msg is limited.
+	 */
+	if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Alloc request from PFFUNC with no NIXLF attached should be denied */
+	if (!is_nixlf_attached(rvu, pcifunc))
+		return NPC_MCAM_ALLOC_DENIED;
+
+	return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
+}
+
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+					 struct npc_mcam_free_entry_req *req,
+					 struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc = 0;
+	u16 cntr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Free request from PFFUNC with no NIXLF attached, ignore */
+	if (!is_nixlf_attached(rvu, pcifunc))
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+
+	if (req->all)
+		goto free_all;
+
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	if (rc)
+		goto exit;
+
+	mcam->entry2pfvf_map[req->entry] = 0;
+	npc_mcam_clear_bit(mcam, req->entry);
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+	/* Update entry2counter mapping */
+	cntr = mcam->entry2cntr_map[req->entry];
+	if (cntr != NPC_MCAM_INVALID_MAP)
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      req->entry, cntr);
+
+	goto exit;
+
+free_all:
+	/* Free up all entries allocated to requesting PFFUNC */
+	npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+exit:
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+					  struct npc_mcam_write_entry_req *req,
+					  struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	if (rc)
+		goto exit;
+
+	if (req->set_cntr &&
+	    npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
+		rc = NPC_MCAM_INVALID_REQ;
+		goto exit;
+	}
+
+	if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+		rc = NPC_MCAM_INVALID_REQ;
+		goto exit;
+	}
+
+	npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+			      &req->entry_data, req->enable_entry);
+
+	if (req->set_cntr)
+		npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					    req->entry, req->cntr);
+
+	rc = 0;
+exit:
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	mutex_unlock(&mcam->lock);
+	if (rc)
+		return rc;
+
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	mutex_unlock(&mcam->lock);
+	if (rc)
+		return rc;
+
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+					  struct npc_mcam_shift_entry_req *req,
+					  struct npc_mcam_shift_entry_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	u16 old_entry, new_entry;
+	u16 index, cntr;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	for (index = 0; index < req->shift_count; index++) {
+		old_entry = req->curr_entry[index];
+		new_entry = req->new_entry[index];
+
+		/* Check if both old and new entries are valid and
+		 * does belong to this PFFUNC or not.
+		 */
+		rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry);
+		if (rc)
+			break;
+
+		rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry);
+		if (rc)
+			break;
+
+		/* new_entry should not have a counter mapped */
+		if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) {
+			rc = NPC_MCAM_PERM_DENIED;
+			break;
+		}
+
+		/* Disable the new_entry */
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
+
+		/* Copy rule from old entry to new entry */
+		npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+
+		/* Copy counter mapping, if any */
+		cntr = mcam->entry2cntr_map[old_entry];
+		if (cntr != NPC_MCAM_INVALID_MAP) {
+			npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+						      old_entry, cntr);
+			npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+						    new_entry, cntr);
+		}
+
+		/* Enable new_entry and disable old_entry */
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true);
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false);
+	}
+
+	/* If shift has failed then report the failed index */
+	if (index != req->shift_count) {
+		rc = NPC_MCAM_PERM_DENIED;
+		rsp->failed_entry_idx = index;
+	}
+
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+			struct npc_mcam_alloc_counter_req *req,
+			struct npc_mcam_alloc_counter_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	u16 max_contig, cntr;
+	int blkaddr, index;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* If the request is from a PFFUNC with no NIXLF attached, ignore */
+	if (!is_nixlf_attached(rvu, pcifunc))
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Since list of allocated counter IDs needs to be sent to requester,
+	 * max number of non-contiguous counters per mbox msg is limited.
+	 */
+	if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+
+	/* Check if unused counters are available or not */
+	if (!rvu_rsrc_free_count(&mcam->counters)) {
+		mutex_unlock(&mcam->lock);
+		return NPC_MCAM_ALLOC_FAILED;
+	}
+
+	rsp->count = 0;
+
+	if (req->contig) {
+		/* Allocate requested number of contiguous counters, if
+		 * unsuccessful find max contiguous entries available.
+		 */
+		index = npc_mcam_find_zero_area(mcam->counters.bmap,
+						mcam->counters.max, 0,
+						req->count, &max_contig);
+		rsp->count = max_contig;
+		rsp->cntr = index;
+		for (cntr = index; cntr < (index + max_contig); cntr++) {
+			__set_bit(cntr, mcam->counters.bmap);
+			mcam->cntr2pfvf_map[cntr] = pcifunc;
+		}
+	} else {
+		/* Allocate requested number of non-contiguous counters,
+		 * if unsuccessful allocate as many as possible.
+		 */
+		for (cntr = 0; cntr < req->count; cntr++) {
+			index = rvu_alloc_rsrc(&mcam->counters);
+			if (index < 0)
+				break;
+			rsp->cntr_list[cntr] = index;
+			rsp->count++;
+			mcam->cntr2pfvf_map[index] = pcifunc;
+		}
+	}
+
+	mutex_unlock(&mcam->lock);
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 index, entry = 0;
+	int blkaddr, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	if (err) {
+		mutex_unlock(&mcam->lock);
+		return err;
+	}
+
+	/* Mark counter as free/unused */
+	mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
+	rvu_free_rsrc(&mcam->counters, req->cntr);
+
+	/* Disable all MCAM entry's stats which are using this counter */
+	while (entry < mcam->bmap_entries) {
+		if (!mcam->cntr_refcnt[req->cntr])
+			break;
+
+		index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+		if (index >= mcam->bmap_entries)
+			break;
+		if (mcam->entry2cntr_map[index] != req->cntr)
+			continue;
+
+		entry = index + 1;
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      index, req->cntr);
+	}
+
+	mutex_unlock(&mcam->lock);
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+		struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 index, entry = 0;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	if (rc)
+		goto exit;
+
+	/* Unmap the MCAM entry and counter */
+	if (!req->all) {
+		rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
+		if (rc)
+			goto exit;
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      req->entry, req->cntr);
+		goto exit;
+	}
+
+	/* Disable all MCAM entry's stats which are using this counter */
+	while (entry < mcam->bmap_entries) {
+		if (!mcam->cntr_refcnt[req->cntr])
+			break;
+
+		index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+		if (index >= mcam->bmap_entries)
+			break;
+		if (mcam->entry2cntr_map[index] != req->cntr)
+			continue;
+
+		entry = index + 1;
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      index, req->cntr);
+	}
+exit:
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	mutex_unlock(&mcam->lock);
+	if (err)
+		return err;
+
+	rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+			struct npc_mcam_oper_counter_req *req,
+			struct npc_mcam_oper_counter_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	mutex_unlock(&mcam->lock);
+	if (err)
+		return err;
+
+	rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
+	rsp->stat &= BIT_ULL(48) - 1;
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+			  struct npc_mcam_alloc_and_write_entry_req *req,
+			  struct npc_mcam_alloc_and_write_entry_rsp *rsp)
+{
+	struct npc_mcam_alloc_counter_req cntr_req;
+	struct npc_mcam_alloc_counter_rsp cntr_rsp;
+	struct npc_mcam_alloc_entry_req entry_req;
+	struct npc_mcam_alloc_entry_rsp entry_rsp;
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 entry = NPC_MCAM_ENTRY_INVALID;
+	u16 cntr = NPC_MCAM_ENTRY_INVALID;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Try to allocate a MCAM entry */
+	entry_req.hdr.pcifunc = req->hdr.pcifunc;
+	entry_req.contig = true;
+	entry_req.priority = req->priority;
+	entry_req.ref_entry = req->ref_entry;
+	entry_req.count = 1;
+
+	rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
+						   &entry_req, &entry_rsp);
+	if (rc)
+		return rc;
+
+	if (!entry_rsp.count)
+		return NPC_MCAM_ALLOC_FAILED;
+
+	entry = entry_rsp.entry;
+
+	if (!req->alloc_cntr)
+		goto write_entry;
+
+	/* Now allocate counter */
+	cntr_req.hdr.pcifunc = req->hdr.pcifunc;
+	cntr_req.contig = true;
+	cntr_req.count = 1;
+
+	rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+	if (rc) {
+		/* Free allocated MCAM entry */
+		mutex_lock(&mcam->lock);
+		mcam->entry2pfvf_map[entry] = 0;
+		npc_mcam_clear_bit(mcam, entry);
+		mutex_unlock(&mcam->lock);
+		return rc;
+	}
+
+	cntr = cntr_rsp.cntr;
+
+write_entry:
+	mutex_lock(&mcam->lock);
+	npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+			      &req->entry_data, req->enable_entry);
+
+	if (req->alloc_cntr)
+		npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr);
+	mutex_unlock(&mcam->lock);
+
+	rsp->entry = entry;
+	rsp->cntr = cntr;
+
+	return 0;
+}
+
+#define GET_KEX_CFG(intf) \
+	rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf))
+
+#define GET_KEX_FLAGS(ld) \
+	rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld))
+
+#define GET_KEX_LD(intf, lid, lt, ld)	\
+	rvu_read64(rvu, BLKADDR_NPC,	\
+		NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld))
+
+#define GET_KEX_LDFLAGS(intf, ld, fl)	\
+	rvu_read64(rvu, BLKADDR_NPC,	\
+		NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl))
+
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+				     struct npc_get_kex_cfg_rsp *rsp)
+{
+	int lid, lt, ld, fl;
+
+	rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX);
+	rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX);
+	for (lid = 0; lid < NPC_MAX_LID; lid++) {
+		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+			for (ld = 0; ld < NPC_MAX_LD; ld++) {
+				rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] =
+					GET_KEX_LD(NIX_INTF_RX, lid, lt, ld);
+				rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] =
+					GET_KEX_LD(NIX_INTF_TX, lid, lt, ld);
+			}
+		}
+	}
+	for (ld = 0; ld < NPC_MAX_LD; ld++)
+		rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld);
+
+	for (ld = 0; ld < NPC_MAX_LD; ld++) {
+		for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+			rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] =
+					GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl);
+			rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] =
+					GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
+		}
+	}
+	return 0;
+}
+
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, index;
+	bool enable;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	if (!pfvf->rxvlan)
+		return 0;
+
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+					 NIXLF_UCAST_ENTRY);
+	pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
+	enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
+	npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
+			      NIX_INTF_RX, &pfvf->entry, enable);
+
+	return 0;
 }
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b3..c7cd008 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2485,13 +2485,11 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
 		skb->ip_summed = re->skb->ip_summed;
 		skb->csum = re->skb->csum;
 		skb_copy_hash(skb, re->skb);
-		skb->vlan_proto = re->skb->vlan_proto;
-		skb->vlan_tci = re->skb->vlan_tci;
+		__vlan_hwaccel_copy_tag(skb, re->skb);
 
 		pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
 					       length, PCI_DMA_FROMDEVICE);
-		re->skb->vlan_proto = 0;
-		re->skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(re->skb);
 		skb_clear_hash(re->skb);
 		re->skb->ip_summed = CHECKSUM_NONE;
 		skb_put(skb, length);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7dbfdac..399f565 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -243,7 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
 		if (dev->phydev->asym_pause)
 			rmt_adv |= LPA_PAUSE_ASYM;
 
-		lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
+		lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
 
 		if (flowctrl & FLOW_CTRL_TX)
@@ -353,8 +353,9 @@ static int mtk_phy_connect(struct net_device *dev)
 
 	phy_set_max_speed(dev->phydev, SPEED_1000);
 	phy_support_asym_pause(dev->phydev);
-	dev->phydev->advertising = dev->phydev->supported |
-				    ADVERTISED_Autoneg;
+	linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+			 dev->phydev->advertising);
 	phy_start_aneg(dev->phydev);
 
 	of_node_put(np);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index d8e9a32..db909b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -144,9 +144,9 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
 }
 
 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-			 int cq_num)
+			 int cq_num, u8 opmod)
 {
-	return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
+	return mlx4_cmd(dev, mailbox->dma, cq_num, opmod,
 			MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
 			MLX4_CMD_WRAPPED);
 }
@@ -287,11 +287,61 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
 		__mlx4_cq_free_icm(dev, cqn);
 }
 
+static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
+{
+	int entries_per_copy = PAGE_SIZE / cqe_size;
+	void *init_ents;
+	int err = 0;
+	int i;
+
+	init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!init_ents)
+		return -ENOMEM;
+
+	/* Populate a list of CQ entries to reduce the number of
+	 * copy_to_user calls. 0xcc is the initialization value
+	 * required by the FW.
+	 */
+	memset(init_ents, 0xcc, PAGE_SIZE);
+
+	if (entries_per_copy < entries) {
+		for (i = 0; i < entries / entries_per_copy; i++) {
+			err = copy_to_user(buf, init_ents, PAGE_SIZE);
+			if (err)
+				goto out;
+
+			buf += PAGE_SIZE;
+		}
+	} else {
+		err = copy_to_user(buf, init_ents, entries * cqe_size);
+	}
+
+out:
+	kfree(init_ents);
+
+	return err;
+}
+
+static void mlx4_init_kernel_cqes(struct mlx4_buf *buf,
+				  int entries,
+				  int cqe_size)
+{
+	int i;
+
+	if (buf->nbufs == 1)
+		memset(buf->direct.buf, 0xcc, entries * cqe_size);
+	else
+		for (i = 0; i < buf->npages; i++)
+			memset(buf->page_list[i].buf, 0xcc,
+			       1UL << buf->page_shift);
+}
+
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
 		  struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
 		  struct mlx4_cq *cq, unsigned vector, int collapsed,
-		  int timestamp_en)
+		  int timestamp_en, void *buf_addr, bool user_cq)
 {
+	bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cq_table *cq_table = &priv->cq_table;
 	struct mlx4_cmd_mailbox *mailbox;
@@ -336,7 +386,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
 	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
 	cq_context->db_rec_addr     = cpu_to_be64(db_rec);
 
-	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
+	if (sw_cq_init) {
+		if (user_cq) {
+			err = mlx4_init_user_cqes(buf_addr, nent,
+						  dev->caps.cqe_size);
+			if (err)
+				sw_cq_init = false;
+		} else {
+			mlx4_init_kernel_cqes(buf_addr, nent,
+					      dev->caps.cqe_size);
+		}
+	}
+
+	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init);
+
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	if (err)
 		goto err_radix;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1e487ac..062a88f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -143,7 +143,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
 	cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
 	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
 			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
-			    cq->vector, 0, timestamp_en);
+			    cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
 	if (err)
 		goto free_eq;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db00bf1..fd09ba9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -875,7 +875,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 			skb->data_len = length;
 			napi_gro_frags(&cq->napi);
 		} else {
-			skb->vlan_tci = 0;
+			__vlan_hwaccel_clear_tag(skb);
 			skb_clear_hash(skb);
 		}
 next:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index babcfd9..7df728f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -166,6 +166,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
 		[37] = "sl to vl mapping table change event support",
 		[38] = "user MAC support",
 		[39] = "Report driver version to FW support",
+		[40] = "SW CQ initialization support",
 	};
 	int i;
 
@@ -1098,6 +1099,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
 	if (field32 & (1 << 21))
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
+	if (field32 & (1 << 23))
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
 
 	for (i = 1; i <= dev_cap->num_ports; i++) {
 		err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6dacaeb..9afdf95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -127,7 +127,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 	else
 #endif
 		if (skb_vlan_tag_present(skb))
-			up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+			up = skb_vlan_tag_get_prio(skb);
 
 	/* channel_ix can be larger than num_channels since
 	 * dev->num_real_tx_queues = num_channels * num_tc
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 8a291eb..080ddd1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -80,6 +80,7 @@
 	depends on IPV6_GRE || IPV6_GRE=n
 	select GENERIC_ALLOCATOR
 	select PARMAN
+	select OBJAGG
 	select MLXFW
 	default m
 	---help---
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 785bf01..df78d23 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -426,15 +426,17 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
 void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
 		      struct mlxsw_afk_key_info *key_info,
 		      struct mlxsw_afk_element_values *values,
-		      char *key, char *mask, int block_start, int block_end)
+		      char *key, char *mask)
 {
+	unsigned int blocks_count =
+			mlxsw_afk_key_info_blocks_count_get(key_info);
 	char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
 	char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
 	const struct mlxsw_afk_element_inst *elinst;
 	enum mlxsw_afk_element element;
 	int block_index, i;
 
-	for (i = block_start; i <= block_end; i++) {
+	for (i = 0; i < blocks_count; i++) {
 		memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
 		memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
 
@@ -451,10 +453,18 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
 						values->storage.mask);
 		}
 
-		if (key)
-			mlxsw_afk->ops->encode_block(block_key, i, key);
-		if (mask)
-			mlxsw_afk->ops->encode_block(block_mask, i, mask);
+		mlxsw_afk->ops->encode_block(key, i, block_key);
+		mlxsw_afk->ops->encode_block(mask, i, block_mask);
 	}
 }
 EXPORT_SYMBOL(mlxsw_afk_encode);
+
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+		     int block_start, int block_end)
+{
+	int i;
+
+	for (i = block_start; i <= block_end; i++)
+		mlxsw_afk->ops->clear_block(key, i);
+}
+EXPORT_SYMBOL(mlxsw_afk_clear);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c29c045..bcd2641 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -188,7 +188,8 @@ struct mlxsw_afk;
 struct mlxsw_afk_ops {
 	const struct mlxsw_afk_block *blocks;
 	unsigned int blocks_count;
-	void (*encode_block)(char *block, int block_index, char *output);
+	void (*encode_block)(char *output, int block_index, char *block);
+	void (*clear_block)(char *output, int block_index);
 };
 
 struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
@@ -228,6 +229,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
 void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
 		      struct mlxsw_afk_key_info *key_info,
 		      struct mlxsw_afk_element_values *values,
-		      char *key, char *mask, int block_start, int block_end);
+		      char *key, char *mask);
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+		     int block_start, int block_end);
 
 #endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 6d29dc4..61f897b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -16,6 +16,15 @@
 #define MLXSW_THERMAL_MAX_TEMP	110000	/* 110C */
 #define MLXSW_THERMAL_MAX_STATE	10
 #define MLXSW_THERMAL_MAX_DUTY	255
+/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
+ * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXSW_THERMAL_SPEED_MIN		(MLXSW_THERMAL_MAX_STATE + 2)
+#define MLXSW_THERMAL_SPEED_MAX		(MLXSW_THERMAL_MAX_STATE * 2)
+#define MLXSW_THERMAL_SPEED_MIN_LEVEL	2		/* 20% */
 
 struct mlxsw_thermal_trip {
 	int	type;
@@ -68,6 +77,7 @@ struct mlxsw_thermal {
 	const struct mlxsw_bus_info *bus_info;
 	struct thermal_zone_device *tzdev;
 	struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+	u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
 	struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
 	enum thermal_device_mode mode;
 };
@@ -285,12 +295,51 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
 	struct mlxsw_thermal *thermal = cdev->devdata;
 	struct device *dev = thermal->bus_info->dev;
 	char mfsc_pl[MLXSW_REG_MFSC_LEN];
-	int err, idx;
+	unsigned long cur_state, i;
+	int idx;
+	u8 duty;
+	int err;
 
 	idx = mlxsw_get_cooling_device_idx(thermal, cdev);
 	if (idx < 0)
 		return idx;
 
+	/* Verify if this request is for changing allowed fan dynamical
+	 * minimum. If it is - update cooling levels accordingly and update
+	 * state, if current state is below the newly requested minimum state.
+	 * For example, if current state is 5, and minimal state is to be
+	 * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
+	 * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
+	 * overwritten.
+	 */
+	if (state >= MLXSW_THERMAL_SPEED_MIN &&
+	    state <= MLXSW_THERMAL_SPEED_MAX) {
+		state -= MLXSW_THERMAL_MAX_STATE;
+		for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
+			thermal->cooling_levels[i] = max(state, i);
+
+		mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+		err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+		if (err)
+			return err;
+
+		duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
+		cur_state = mlxsw_duty_to_state(duty);
+
+		/* If current fan state is lower than requested dynamical
+		 * minimum, increase fan speed up to dynamical minimum.
+		 */
+		if (state < cur_state)
+			return 0;
+
+		state = cur_state;
+	}
+
+	if (state > MLXSW_THERMAL_MAX_STATE)
+		return -EINVAL;
+
+	/* Normalize the state to the valid speed range. */
+	state = thermal->cooling_levels[state];
 	mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
 	err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
 	if (err) {
@@ -369,6 +418,11 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
 		}
 	}
 
+	/* Initialize cooling levels per PWM state. */
+	for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
+		thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
+						 i);
+
 	thermal->tzdev = thermal_zone_device_register("mlxsw",
 						      MLXSW_THERMAL_NUM_TRIPS,
 						      MLXSW_THERMAL_TRIP_MASK,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index db3d279..5c8a38f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -641,6 +641,10 @@ enum mlxsw_reg_sfn_rec_type {
 	MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
 	/* Aged-out MAC address on a LAG port. */
 	MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8,
+	/* Learned unicast tunnel record. */
+	MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL = 0xD,
+	/* Aged-out unicast tunnel record. */
+	MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL = 0xE,
 };
 
 /* reg_sfn_rec_type
@@ -704,6 +708,66 @@ static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
 	*p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index);
 }
 
+/* reg_sfn_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * address of the remote VTEP.
+ * When protocol is IPv6, reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_msb, MLXSW_REG_SFN_BASE_LEN, 24,
+		     8, MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfn_uc_tunnel_protocol {
+	MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV4,
+	MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfn_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27,
+		     1, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+/* reg_sfn_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 address of the remote VTEP.
+ * When protocol is IPv6, ipv6_id to be queried from TNIPSD.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0,
+		     24, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+enum mlxsw_reg_sfn_tunnel_port {
+	MLXSW_REG_SFN_TUNNEL_PORT_NVE,
+	MLXSW_REG_SFN_TUNNEL_PORT_VPLS,
+	MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0,
+	MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_sfn_uc_tunnel_port
+ * Tunnel port.
+ * Reserved on Spectrum.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, tunnel_port, MLXSW_REG_SFN_BASE_LEN, 0, 4,
+		     MLXSW_REG_SFN_REC_LEN, 0x10, false);
+
+static inline void
+mlxsw_reg_sfn_uc_tunnel_unpack(char *payload, int rec_index, char *mac,
+			       u16 *p_fid, u32 *p_uip,
+			       enum mlxsw_reg_sfn_uc_tunnel_protocol *p_proto)
+{
+	u32 uip_msb, uip_lsb;
+
+	mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+	*p_fid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+	uip_msb = mlxsw_reg_sfn_uc_tunnel_uip_msb_get(payload, rec_index);
+	uip_lsb = mlxsw_reg_sfn_uc_tunnel_uip_lsb_get(payload, rec_index);
+	*p_uip = uip_msb << 24 | uip_lsb;
+	*p_proto = mlxsw_reg_sfn_uc_tunnel_protocol_get(payload, rec_index);
+}
+
 /* SPMS - Switch Port MSTP/RSTP State Register
  * -------------------------------------------
  * Configures the spanning tree state of a physical port.
@@ -2834,8 +2898,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
 					u32 priority,
 					const char *tcam_region_info,
 					const char *key, u8 erp_id,
-					bool large_exists, u32 lkey_id,
-					u32 action_pointer)
+					u16 delta_start, u8 delta_mask,
+					u8 delta_value, bool large_exists,
+					u32 lkey_id, u32 action_pointer)
 {
 	MLXSW_REG_ZERO(ptce3, payload);
 	mlxsw_reg_ptce3_v_set(payload, valid);
@@ -2844,6 +2909,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
 	mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
 	mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
 	mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+	mlxsw_reg_ptce3_delta_start_set(payload, delta_start);
+	mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask);
+	mlxsw_reg_ptce3_delta_value_set(payload, delta_value);
 	mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
 	mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
 	mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
@@ -4231,8 +4299,11 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
 
 enum mlxsw_reg_ppcnt_grp {
 	MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+	MLXSW_REG_PPCNT_RFC_2863_CNT = 0x1,
 	MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
+	MLXSW_REG_PPCNT_RFC_3635_CNT = 0x3,
 	MLXSW_REG_PPCNT_EXT_CNT = 0x5,
+	MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
 	MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
 	MLXSW_REG_PPCNT_TC_CNT = 0x11,
 	MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
@@ -4247,6 +4318,7 @@ enum mlxsw_reg_ppcnt_grp {
  * 0x2: RFC 2819 Counters
  * 0x3: RFC 3635 Counters
  * 0x5: Ethernet Extended Counters
+ * 0x6: Ethernet Discard Counters
  * 0x8: Link Level Retransmission Counters
  * 0x10: Per Priority Counters
  * 0x11: Per Traffic Class Counters
@@ -4390,8 +4462,46 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
 MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
 	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
 
+/* Ethernet RFC 2863 Counter Group */
+
+/* reg_ppcnt_if_in_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_in_discards,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_if_out_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_discards,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_if_out_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_errors,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
 /* Ethernet RFC 2819 Counter Group */
 
+/* reg_ppcnt_ether_stats_undersize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_undersize_pkts,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_ether_stats_oversize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_oversize_pkts,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_ether_stats_fragments
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_fragments,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
 /* reg_ppcnt_ether_stats_pkts64octets
  * Access: RO
  */
@@ -4452,6 +4562,32 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
 MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
 	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
 
+/* Ethernet RFC 3635 Counter Group */
+
+/* reg_ppcnt_dot3stats_fcs_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_fcs_errors,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_dot3stats_symbol_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_symbol_errors,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_dot3control_in_unknown_opcodes
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3control_in_unknown_opcodes,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_dot3in_pause_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3in_pause_frames,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
 /* Ethernet Extended Counter Group Counters */
 
 /* reg_ppcnt_ecn_marked
@@ -4460,6 +4596,80 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
 MLXSW_ITEM64(reg, ppcnt, ecn_marked,
 	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
 
+/* Ethernet Discard Counter Group Counters */
+
+/* reg_ppcnt_ingress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_general,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_ingress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_policy_engine,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_ingress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_vlan_membership,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_ingress_tag_frame_type
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tag_frame_type,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
+
+/* reg_ppcnt_egress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_vlan_membership,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_loopback_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, loopback_filter,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_egress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_general,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_egress_hoq
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_hoq,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_egress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_policy_engine,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_ingress_tx_link_down
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tx_link_down,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_egress_stp_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_stp_filter,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_egress_sll
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_sll,
+	     MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
 /* Ethernet Per Priority Group Counters */
 
 /* reg_ppcnt_rx_octets
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bec9403..637e2ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1876,8 +1876,38 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
 
 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
 
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
+	{
+		.str = "if_in_discards",
+		.getter = mlxsw_reg_ppcnt_if_in_discards_get,
+	},
+	{
+		.str = "if_out_discards",
+		.getter = mlxsw_reg_ppcnt_if_out_discards_get,
+	},
+	{
+		.str = "if_out_errors",
+		.getter = mlxsw_reg_ppcnt_if_out_errors_get,
+	},
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
+	ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
+
 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
 	{
+		.str = "ether_stats_undersize_pkts",
+		.getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
+	},
+	{
+		.str = "ether_stats_oversize_pkts",
+		.getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
+	},
+	{
+		.str = "ether_stats_fragments",
+		.getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
+	},
+	{
 		.str = "ether_pkts64octets",
 		.getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
 	},
@@ -1922,6 +1952,82 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
 	ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
 
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
+	{
+		.str = "dot3stats_fcs_errors",
+		.getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
+	},
+	{
+		.str = "dot3stats_symbol_errors",
+		.getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
+	},
+	{
+		.str = "dot3control_in_unknown_opcodes",
+		.getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
+	},
+	{
+		.str = "dot3in_pause_frames",
+		.getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
+	},
+};
+
+#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
+	ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
+	{
+		.str = "discard_ingress_general",
+		.getter = mlxsw_reg_ppcnt_ingress_general_get,
+	},
+	{
+		.str = "discard_ingress_policy_engine",
+		.getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
+	},
+	{
+		.str = "discard_ingress_vlan_membership",
+		.getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
+	},
+	{
+		.str = "discard_ingress_tag_frame_type",
+		.getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
+	},
+	{
+		.str = "discard_egress_vlan_membership",
+		.getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
+	},
+	{
+		.str = "discard_loopback_filter",
+		.getter = mlxsw_reg_ppcnt_loopback_filter_get,
+	},
+	{
+		.str = "discard_egress_general",
+		.getter = mlxsw_reg_ppcnt_egress_general_get,
+	},
+	{
+		.str = "discard_egress_hoq",
+		.getter = mlxsw_reg_ppcnt_egress_hoq_get,
+	},
+	{
+		.str = "discard_egress_policy_engine",
+		.getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
+	},
+	{
+		.str = "discard_ingress_tx_link_down",
+		.getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
+	},
+	{
+		.str = "discard_egress_stp_filter",
+		.getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
+	},
+	{
+		.str = "discard_egress_sll",
+		.getter = mlxsw_reg_ppcnt_egress_sll_get,
+	},
+};
+
+#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
+	ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
+
 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
 	{
 		.str = "rx_octets_prio",
@@ -1974,7 +2080,10 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
 
 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+					 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
 					 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+					 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+					 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
 					 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
 					  IEEE_8021QAZ_MAX_TCS) + \
 					 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
@@ -2015,12 +2124,31 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
 			       ETH_GSTRING_LEN);
 			p += ETH_GSTRING_LEN;
 		}
+
+		for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
+			memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
 		for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
 			memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
 			       ETH_GSTRING_LEN);
 			p += ETH_GSTRING_LEN;
 		}
 
+		for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
+			memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
+		for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
+			memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
 			mlxsw_sp_port_get_prio_strings(&p, i);
 
@@ -2063,10 +2191,22 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
 		*p_hw_stats = mlxsw_sp_port_hw_stats;
 		*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
 		break;
+	case MLXSW_REG_PPCNT_RFC_2863_CNT:
+		*p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
+		*p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+		break;
 	case MLXSW_REG_PPCNT_RFC_2819_CNT:
 		*p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
 		*p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
 		break;
+	case MLXSW_REG_PPCNT_RFC_3635_CNT:
+		*p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
+		*p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+		break;
+	case MLXSW_REG_PPCNT_DISCARD_CNT:
+		*p_hw_stats = mlxsw_sp_port_hw_discard_stats;
+		*p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+		break;
 	case MLXSW_REG_PPCNT_PRIO_CNT:
 		*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
 		*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2116,11 +2256,26 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
 				  data, data_index);
 	data_index = MLXSW_SP_PORT_HW_STATS_LEN;
 
+	/* RFC 2863 Counters */
+	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
+				  data, data_index);
+	data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+
 	/* RFC 2819 Counters */
 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
 				  data, data_index);
 	data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
 
+	/* RFC 3635 Counters */
+	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
+				  data, data_index);
+	data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+
+	/* Discard Counters */
+	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
+				  data, data_index);
+	data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+
 	/* Per-Priority Counters */
 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 0875a79..973a9d2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -721,6 +721,9 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
 			   struct tc_prio_qopt_offload *p);
 
 /* spectrum_fid.c */
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+						  u16 fid_index);
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
 struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
 						__be32 vni);
 int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
@@ -728,7 +731,7 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
 				     u32 nve_flood_index);
 void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
 bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni);
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni, int nve_ifindex);
 void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
 bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
 int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
@@ -810,6 +813,9 @@ struct mlxsw_sp_nve_params {
 extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
 extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
 
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+				    enum mlxsw_sp_l3proto proto,
+				    union mlxsw_sp_l3addr *addr);
 int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
 			      struct mlxsw_sp_fid *fid,
 			      enum mlxsw_sp_l3proto proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 8ca77f3..62e6cf4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -34,15 +34,15 @@ mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregio
 {
 	struct mlxsw_sp_acl_atcam_region *aregion;
 	struct mlxsw_sp_acl_atcam_entry *aentry;
-	struct mlxsw_sp_acl_erp *erp;
+	struct mlxsw_sp_acl_erp_mask *erp_mask;
 
 	aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
 	aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
 
-	erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
-	if (IS_ERR(erp))
-		return PTR_ERR(erp);
-	aentry->erp = erp;
+	erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
+	if (IS_ERR(erp_mask))
+		return PTR_ERR(erp_mask);
+	aentry->erp_mask = erp_mask;
 
 	return 0;
 }
@@ -57,7 +57,7 @@ mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregio
 	aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
 	aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
 
-	mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+	mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
 }
 
 static const struct mlxsw_sp_acl_ctcam_region_ops
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 2dda028..e7bd873 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -14,8 +14,8 @@
 #include "spectrum_acl_tcam.h"
 #include "core_acl_flex_keys.h"
 
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START	6
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END	11
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START	0
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END	5
 
 struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
 	char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
@@ -34,7 +34,7 @@ struct mlxsw_sp_acl_atcam_region_ops {
 	void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
 	struct mlxsw_sp_acl_atcam_lkey_id *
 		(*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
-			       struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+			       char *enc_key, u8 erp_id);
 	void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
 			    struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
 };
@@ -64,7 +64,7 @@ static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
 static bool
 mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
 {
-	return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+	return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask);
 }
 
 static int
@@ -90,8 +90,7 @@ mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion
 
 static struct mlxsw_sp_acl_atcam_lkey_id *
 mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
-				       struct mlxsw_sp_acl_rule_info *rulei,
-				       u8 erp_id)
+				       char *enc_key, u8 erp_id)
 {
 	struct mlxsw_sp_acl_atcam_region_generic *region_generic;
 
@@ -220,8 +219,7 @@ mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
 
 static struct mlxsw_sp_acl_atcam_lkey_id *
 mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
-				    struct mlxsw_sp_acl_rule_info *rulei,
-				    u8 erp_id)
+				    char *enc_key, u8 erp_id)
 {
 	struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
 	struct mlxsw_sp_acl_tcam_region *region = aregion->region;
@@ -230,9 +228,10 @@ mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
 	struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
 
-	mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
-			 NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
-			 MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+	memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key));
+	mlxsw_afk_clear(afk, ht_key.enc_key,
+			MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START,
+			MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END);
 	ht_key.erp_id = erp_id;
 	lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
 					 mlxsw_sp_acl_atcam_lkey_id_ht_params);
@@ -379,7 +378,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
 				       struct mlxsw_sp_acl_rule_info *rulei)
 {
 	struct mlxsw_sp_acl_tcam_region *region = aregion->region;
-	u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+	u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
 	struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
 	char ptce3_pl[MLXSW_REG_PTCE3_LEN];
 	u32 kvdl_index, priority;
@@ -389,7 +388,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
 	if (err)
 		return err;
 
-	lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+	lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+					    erp_id);
 	if (IS_ERR(lkey_id))
 		return PTR_ERR(lkey_id);
 	aentry->lkey_id = lkey_id;
@@ -398,6 +398,9 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
 	mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
 			     priority, region->tcam_region_info,
 			     aentry->ht_key.enc_key, erp_id,
+			     aentry->delta_info.start,
+			     aentry->delta_info.mask,
+			     aentry->delta_info.value,
 			     refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
 			     kvdl_index);
 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
@@ -418,12 +421,17 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
 {
 	struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
 	struct mlxsw_sp_acl_tcam_region *region = aregion->region;
-	u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+	u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+	char *enc_key = aentry->ht_key.enc_key;
 	char ptce3_pl[MLXSW_REG_PTCE3_LEN];
 
 	mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
-			     region->tcam_region_info, aentry->ht_key.enc_key,
-			     erp_id, refcount_read(&lkey_id->refcnt) != 1,
+			     region->tcam_region_info,
+			     enc_key, erp_id,
+			     aentry->delta_info.start,
+			     aentry->delta_info.mask,
+			     aentry->delta_info.value,
+			     refcount_read(&lkey_id->refcnt) != 1,
 			     lkey_id->id, 0);
 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
 	aregion->ops->lkey_id_put(aregion, lkey_id);
@@ -438,19 +446,30 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
 	struct mlxsw_sp_acl_tcam_region *region = aregion->region;
 	char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
-	struct mlxsw_sp_acl_erp *erp;
-	unsigned int blocks_count;
+	const struct mlxsw_sp_acl_erp_delta *delta;
+	struct mlxsw_sp_acl_erp_mask *erp_mask;
 	int err;
 
-	blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
 	mlxsw_afk_encode(afk, region->key_info, &rulei->values,
-			 aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
+			 aentry->full_enc_key, mask);
 
-	erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
-	if (IS_ERR(erp))
-		return PTR_ERR(erp);
-	aentry->erp = erp;
-	aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+	erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+	if (IS_ERR(erp_mask))
+		return PTR_ERR(erp_mask);
+	aentry->erp_mask = erp_mask;
+	aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+	memcpy(aentry->ht_key.enc_key, aentry->full_enc_key,
+	       sizeof(aentry->ht_key.enc_key));
+
+	/* Compute all needed delta information and clear the delta bits
+	 * from the encrypted key.
+	 */
+	delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
+	aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+	aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+	aentry->delta_info.value =
+		mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key);
+	mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
 
 	/* We can't insert identical rules into the A-TCAM, so fail and
 	 * let the rule spill into C-TCAM
@@ -472,7 +491,7 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
 	rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
 			       mlxsw_sp_acl_atcam_entries_ht_params);
 err_rhashtable_insert:
-	mlxsw_sp_acl_erp_put(aregion, erp);
+	mlxsw_sp_acl_erp_mask_put(aregion, erp_mask);
 	return err;
 }
 
@@ -484,7 +503,7 @@ __mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
 	mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
 	rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
 			       mlxsw_sp_acl_atcam_entries_ht_params);
-	mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+	mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
 }
 
 int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index e3c6fe8..f3e834b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -46,7 +46,6 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
 	struct mlxsw_sp_acl_tcam_region *region = cregion->region;
 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
 	char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-	unsigned int blocks_count;
 	char *act_set;
 	u32 priority;
 	char *mask;
@@ -63,9 +62,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
 			     centry->parman_item.index, priority);
 	key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
 	mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
-	blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
-	mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
-			 blocks_count - 1);
+	mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
 
 	err = cregion->ops->entry_insert(cregion, centry, mask);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 0a4fd3c..d9a4b7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -7,7 +7,7 @@
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/rhashtable.h>
+#include <linux/objagg.h>
 #include <linux/rtnetlink.h>
 #include <linux/slab.h>
 
@@ -29,6 +29,8 @@ struct mlxsw_sp_acl_erp_core {
 
 struct mlxsw_sp_acl_erp_key {
 	char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+#define __MASK_LEN 0x38
+#define __MASK_IDX(i) (__MASK_LEN - (i) - 1)
 	bool ctcam;
 };
 
@@ -36,10 +38,8 @@ struct mlxsw_sp_acl_erp {
 	struct mlxsw_sp_acl_erp_key key;
 	u8 id;
 	u8 index;
-	refcount_t refcnt;
 	DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
 	struct list_head list;
-	struct rhash_head ht_node;
 	struct mlxsw_sp_acl_erp_table *erp_table;
 };
 
@@ -53,7 +53,6 @@ struct mlxsw_sp_acl_erp_table {
 	DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
 	DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
 	struct list_head atcam_erps_list;
-	struct rhashtable erp_ht;
 	struct mlxsw_sp_acl_erp_core *erp_core;
 	struct mlxsw_sp_acl_atcam_region *aregion;
 	const struct mlxsw_sp_acl_erp_table_ops *ops;
@@ -61,12 +60,8 @@ struct mlxsw_sp_acl_erp_table {
 	unsigned int num_atcam_erps;
 	unsigned int num_max_atcam_erps;
 	unsigned int num_ctcam_erps;
-};
-
-static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
-	.key_len = sizeof(struct mlxsw_sp_acl_erp_key),
-	.key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
-	.head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+	unsigned int num_deltas;
+	struct objagg *objagg;
 };
 
 struct mlxsw_sp_acl_erp_table_ops {
@@ -119,16 +114,6 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
 	.erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
 };
 
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
-{
-	return erp->key.ctcam;
-}
-
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
-{
-	return erp->id;
-}
-
 static unsigned int
 mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
 {
@@ -194,12 +179,15 @@ mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
 
 static int
 mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
-				 const struct mlxsw_sp_acl_erp *erp)
+				 struct mlxsw_sp_acl_erp_key *key)
 {
+	DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
 	unsigned long bit;
 	int err;
 
-	for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+	bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+			  MLXSW_SP_ACL_TCAM_MASK_LEN);
+	for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
 		mlxsw_sp_acl_erp_master_mask_bit_set(bit,
 						     &erp_table->master_mask);
 
@@ -210,7 +198,7 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
 	return 0;
 
 err_master_mask_update:
-	for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+	for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
 		mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
 						       &erp_table->master_mask);
 	return err;
@@ -218,12 +206,15 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
 
 static int
 mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
-				   const struct mlxsw_sp_acl_erp *erp)
+				   struct mlxsw_sp_acl_erp_key *key)
 {
+	DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
 	unsigned long bit;
 	int err;
 
-	for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+	bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+			  MLXSW_SP_ACL_TCAM_MASK_LEN);
+	for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
 		mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
 						       &erp_table->master_mask);
 
@@ -234,7 +225,7 @@ mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
 	return 0;
 
 err_master_mask_update:
-	for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+	for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
 		mlxsw_sp_acl_erp_master_mask_bit_set(bit,
 						     &erp_table->master_mask);
 	return err;
@@ -256,26 +247,16 @@ mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
 		goto err_erp_id_get;
 
 	memcpy(&erp->key, key, sizeof(*key));
-	bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
-			  MLXSW_SP_ACL_TCAM_MASK_LEN);
 	list_add(&erp->list, &erp_table->atcam_erps_list);
-	refcount_set(&erp->refcnt, 1);
 	erp_table->num_atcam_erps++;
 	erp->erp_table = erp_table;
 
-	err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+	err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
 	if (err)
 		goto err_master_mask_set;
 
-	err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
-				     mlxsw_sp_acl_erp_ht_params);
-	if (err)
-		goto err_rhashtable_insert;
-
 	return erp;
 
-err_rhashtable_insert:
-	mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
 err_master_mask_set:
 	erp_table->num_atcam_erps--;
 	list_del(&erp->list);
@@ -290,9 +271,7 @@ mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
 {
 	struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
 
-	rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
-			       mlxsw_sp_acl_erp_ht_params);
-	mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+	mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
 	erp_table->num_atcam_erps--;
 	list_del(&erp->list);
 	mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
@@ -647,9 +626,56 @@ mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
 	mlxsw_sp_acl_erp_table_enable(erp_table, false);
 }
 
-static void
-mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+static int
+__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table,
+				   unsigned int *inc_num)
 {
+	int err;
+
+	/* If there are C-TCAM eRP or deltas in use we need to transition
+	 * the region to use eRP table, if it is not already done
+	 */
+	if (erp_table->ops != &erp_two_masks_ops &&
+	    erp_table->ops != &erp_multiple_masks_ops) {
+		err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+		if (err)
+			return err;
+	}
+
+	/* When C-TCAM or deltas are used, the eRP table must be used */
+	if (erp_table->ops != &erp_multiple_masks_ops)
+		erp_table->ops = &erp_multiple_masks_ops;
+
+	(*inc_num)++;
+
+	return 0;
+}
+
+static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+	return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+						  &erp_table->num_ctcam_erps);
+}
+
+static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+	return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+						  &erp_table->num_deltas);
+}
+
+static void
+__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table,
+				   unsigned int *dec_num)
+{
+	(*dec_num)--;
+
+	/* If there are no C-TCAM eRP or deltas in use, the state we
+	 * transition to depends on the number of A-TCAM eRPs currently
+	 * in use.
+	 */
+	if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0)
+		return;
+
 	switch (erp_table->num_atcam_erps) {
 	case 2:
 		/* Keep using the eRP table, but correctly set the
@@ -683,9 +709,21 @@ mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
 	}
 }
 
+static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+	__mlxsw_sp_acl_erp_table_other_dec(erp_table,
+					   &erp_table->num_ctcam_erps);
+}
+
+static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+	__mlxsw_sp_acl_erp_table_other_dec(erp_table,
+					   &erp_table->num_deltas);
+}
+
 static struct mlxsw_sp_acl_erp *
-__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
-				     struct mlxsw_sp_acl_erp_key *key)
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+				   struct mlxsw_sp_acl_erp_key *key)
 {
 	struct mlxsw_sp_acl_erp *erp;
 	int err;
@@ -697,89 +735,41 @@ __mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
 	memcpy(&erp->key, key, sizeof(*key));
 	bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
 			  MLXSW_SP_ACL_TCAM_MASK_LEN);
-	refcount_set(&erp->refcnt, 1);
-	erp_table->num_ctcam_erps++;
+
+	err = mlxsw_sp_acl_erp_ctcam_inc(erp_table);
+	if (err)
+		goto err_erp_ctcam_inc;
+
 	erp->erp_table = erp_table;
 
-	err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+	err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
 	if (err)
 		goto err_master_mask_set;
 
-	err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
-				     mlxsw_sp_acl_erp_ht_params);
-	if (err)
-		goto err_rhashtable_insert;
-
 	err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
 	if (err)
 		goto err_erp_region_ctcam_enable;
 
-	/* When C-TCAM is used, the eRP table must be used */
-	erp_table->ops = &erp_multiple_masks_ops;
-
 	return erp;
 
 err_erp_region_ctcam_enable:
-	rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
-			       mlxsw_sp_acl_erp_ht_params);
-err_rhashtable_insert:
-	mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+	mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
 err_master_mask_set:
-	erp_table->num_ctcam_erps--;
+	mlxsw_sp_acl_erp_ctcam_dec(erp_table);
+err_erp_ctcam_inc:
 	kfree(erp);
 	return ERR_PTR(err);
 }
 
-static struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
-				   struct mlxsw_sp_acl_erp_key *key)
-{
-	struct mlxsw_sp_acl_erp *erp;
-	int err;
-
-	/* There is a special situation where we need to spill rules
-	 * into the C-TCAM, yet the region is still using a master
-	 * mask and thus not performing a lookup in the C-TCAM. This
-	 * can happen when two rules that only differ in priority - and
-	 * thus sharing the same key - are programmed. In this case
-	 * we transition the region to use an eRP table
-	 */
-	err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
-	if (err)
-		return ERR_PTR(err);
-
-	erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
-	if (IS_ERR(erp)) {
-		err = PTR_ERR(erp);
-		goto err_erp_create;
-	}
-
-	return erp;
-
-err_erp_create:
-	mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
-	return ERR_PTR(err);
-}
-
 static void
 mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
 {
 	struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
 
 	mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
-	rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
-			       mlxsw_sp_acl_erp_ht_params);
-	mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
-	erp_table->num_ctcam_erps--;
+	mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+	mlxsw_sp_acl_erp_ctcam_dec(erp_table);
 	kfree(erp);
-
-	/* Once the last C-TCAM eRP was destroyed, the state we
-	 * transition to depends on the number of A-TCAM eRPs currently
-	 * in use
-	 */
-	if (erp_table->num_ctcam_erps > 0)
-		return;
-	mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
 }
 
 static struct mlxsw_sp_acl_erp *
@@ -790,7 +780,7 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
 	int err;
 
 	if (key->ctcam)
-		return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+		return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
 
 	/* Expand the eRP table for the new eRP, if needed */
 	err = mlxsw_sp_acl_erp_table_expand(erp_table);
@@ -838,7 +828,8 @@ mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
 	mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
 	mlxsw_sp_acl_erp_generic_destroy(erp);
 
-	if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+	if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 &&
+	    erp_table->num_deltas == 0)
 		erp_table->ops = &erp_two_masks_ops;
 }
 
@@ -940,13 +931,12 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
 	WARN_ON(1);
 }
 
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
-		     const char *mask, bool ctcam)
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+			  const char *mask, bool ctcam)
 {
-	struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
 	struct mlxsw_sp_acl_erp_key key;
-	struct mlxsw_sp_acl_erp *erp;
+	struct objagg_obj *objagg_obj;
 
 	/* eRPs are allocated from a shared resource, but currently all
 	 * allocations are done under RTNL.
@@ -955,29 +945,238 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
 
 	memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
 	key.ctcam = ctcam;
-	erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
-				     mlxsw_sp_acl_erp_ht_params);
-	if (erp) {
-		refcount_inc(&erp->refcnt);
-		return erp;
-	}
-
-	return erp_table->ops->erp_create(erp_table, &key);
+	objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
+	if (IS_ERR(objagg_obj))
+		return ERR_CAST(objagg_obj);
+	return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
 }
 
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
-			  struct mlxsw_sp_acl_erp *erp)
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+			       struct mlxsw_sp_acl_erp_mask *erp_mask)
 {
-	struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+	struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
 
 	ASSERT_RTNL();
-
-	if (!refcount_dec_and_test(&erp->refcnt))
-		return;
-
-	erp_table->ops->erp_destroy(erp_table, erp);
+	objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
 }
 
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+	struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+	const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj);
+
+	return key->ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+	struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+	const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+
+	return erp->id;
+}
+
+struct mlxsw_sp_acl_erp_delta {
+	struct mlxsw_sp_acl_erp_key key;
+	u16 start;
+	u8 mask;
+};
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+	return delta->start;
+}
+
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+	return delta->mask;
+}
+
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+				const char *enc_key)
+{
+	u16 start = delta->start;
+	u8 mask = delta->mask;
+	u16 tmp;
+
+	if (!mask)
+		return 0;
+
+	tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)];
+	if (start / 8 + 1 < __MASK_LEN)
+		tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8;
+	tmp >>= start % 8;
+	tmp &= mask;
+	return tmp;
+}
+
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+				  const char *enc_key)
+{
+	u16 start = delta->start;
+	u8 mask = delta->mask;
+	unsigned char *byte;
+	u16 tmp;
+
+	tmp = mask;
+	tmp <<= start % 8;
+	tmp = ~tmp;
+
+	byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)];
+	*byte &= tmp & 0xff;
+	if (start / 8 + 1 < __MASK_LEN) {
+		byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)];
+		*byte &= (tmp >> 8) & 0xff;
+	}
+}
+
+static const struct mlxsw_sp_acl_erp_delta
+mlxsw_sp_acl_erp_delta_default = {};
+
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+	struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+	const struct mlxsw_sp_acl_erp_delta *delta;
+
+	delta = objagg_obj_delta_priv(objagg_obj);
+	if (!delta)
+		delta = &mlxsw_sp_acl_erp_delta_default;
+	return delta;
+}
+
+static int
+mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
+			    const struct mlxsw_sp_acl_erp_key *key,
+			    u16 *delta_start, u8 *delta_mask)
+{
+	int offset = 0;
+	int si = -1;
+	u16 pmask;
+	u16 mask;
+	int i;
+
+	/* The difference between 2 masks can be up to 8 consecutive bits. */
+	for (i = 0; i < __MASK_LEN; i++) {
+		if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)])
+			continue;
+		if (si == -1)
+			si = i;
+		else if (si != i - 1)
+			return -EINVAL;
+	}
+	if (si == -1) {
+		/* The masks are the same, this cannot happen.
+		 * That means the caller is broken.
+		 */
+		WARN_ON(1);
+		*delta_start = 0;
+		*delta_mask = 0;
+		return 0;
+	}
+	pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
+	mask = (unsigned char) key->mask[__MASK_IDX(si)];
+	if (si + 1 < __MASK_LEN) {
+		pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8;
+		mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8;
+	}
+
+	if ((pmask ^ mask) & pmask)
+		return -EINVAL;
+	mask &= ~pmask;
+	while (!(mask & (1 << offset)))
+		offset++;
+	while (!(mask & 1))
+		mask >>= 1;
+	if (mask & 0xff00)
+		return -EINVAL;
+
+	*delta_start = si * 8 + offset;
+	*delta_mask = mask;
+
+	return 0;
+}
+
+static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+					   void *obj)
+{
+	struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+	struct mlxsw_sp_acl_atcam_region *aregion = priv;
+	struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+	struct mlxsw_sp_acl_erp_key *key = obj;
+	struct mlxsw_sp_acl_erp_delta *delta;
+	u16 delta_start;
+	u8 delta_mask;
+	int err;
+
+	if (parent_key->ctcam || key->ctcam)
+		return ERR_PTR(-EINVAL);
+	err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+					  &delta_start, &delta_mask);
+	if (err)
+		return ERR_PTR(-EINVAL);
+
+	delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+	if (!delta)
+		return ERR_PTR(-ENOMEM);
+	delta->start = delta_start;
+	delta->mask = delta_mask;
+
+	err = mlxsw_sp_acl_erp_delta_inc(erp_table);
+	if (err)
+		goto err_erp_delta_inc;
+
+	memcpy(&delta->key, key, sizeof(*key));
+	err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key);
+	if (err)
+		goto err_master_mask_set;
+
+	return delta;
+
+err_master_mask_set:
+	mlxsw_sp_acl_erp_delta_dec(erp_table);
+err_erp_delta_inc:
+	kfree(delta);
+	return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
+{
+	struct mlxsw_sp_acl_erp_delta *delta = delta_priv;
+	struct mlxsw_sp_acl_atcam_region *aregion = priv;
+	struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+	mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key);
+	mlxsw_sp_acl_erp_delta_dec(erp_table);
+	kfree(delta);
+}
+
+static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj)
+{
+	struct mlxsw_sp_acl_atcam_region *aregion = priv;
+	struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+	struct mlxsw_sp_acl_erp_key *key = obj;
+
+	return erp_table->ops->erp_create(erp_table, key);
+}
+
+static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+{
+	struct mlxsw_sp_acl_atcam_region *aregion = priv;
+	struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+	erp_table->ops->erp_destroy(erp_table, root_priv);
+}
+
+static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+	.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+	.delta_create = mlxsw_sp_acl_erp_delta_create,
+	.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+	.root_create = mlxsw_sp_acl_erp_root_create,
+	.root_destroy = mlxsw_sp_acl_erp_root_destroy,
+};
+
 static struct mlxsw_sp_acl_erp_table *
 mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
 {
@@ -988,9 +1187,12 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
 	if (!erp_table)
 		return ERR_PTR(-ENOMEM);
 
-	err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
-	if (err)
-		goto err_rhashtable_init;
+	erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
+					  aregion);
+	if (IS_ERR(erp_table->objagg)) {
+		err = PTR_ERR(erp_table->objagg);
+		goto err_objagg_create;
+	}
 
 	erp_table->erp_core = aregion->atcam->erp_core;
 	erp_table->ops = &erp_no_mask_ops;
@@ -999,7 +1201,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
 
 	return erp_table;
 
-err_rhashtable_init:
+err_objagg_create:
 	kfree(erp_table);
 	return ERR_PTR(err);
 }
@@ -1008,7 +1210,7 @@ static void
 mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
 {
 	WARN_ON(!list_empty(&erp_table->atcam_erps_list));
-	rhashtable_destroy(&erp_table->erp_ht);
+	objagg_destroy(erp_table->objagg);
 	kfree(erp_table);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index d409b09..2e1e8c4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -98,8 +98,8 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
 
 #define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
 
-static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
-				       char *output)
+static void mlxsw_sp1_afk_encode_block(char *output, int block_index,
+				       char *block)
 {
 	unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
 	char *output_indexed = output + offset;
@@ -107,10 +107,19 @@ static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
 	memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
 }
 
+static void mlxsw_sp1_afk_clear_block(char *output, int block_index)
+{
+	unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+	char *output_indexed = output + offset;
+
+	memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
 const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
 	.blocks		= mlxsw_sp1_afk_blocks,
 	.blocks_count	= ARRAY_SIZE(mlxsw_sp1_afk_blocks),
 	.encode_block	= mlxsw_sp1_afk_encode_block,
+	.clear_block	= mlxsw_sp1_afk_clear_block,
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
@@ -263,10 +272,9 @@ static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
 	MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
 };
 
-static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
-				       char *output)
+static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index,
+					    u64 block_value)
 {
-	u64 block_value = mlxsw_sp2_afk_block_value_get(block);
 	const struct mlxsw_sp2_afk_block_layout *block_layout;
 
 	if (WARN_ON(block_index < 0 ||
@@ -278,8 +286,22 @@ static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
 			   &block_layout->item, 0, block_value);
 }
 
+static void mlxsw_sp2_afk_encode_block(char *output, int block_index,
+				       char *block)
+{
+	u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+
+	__mlxsw_sp2_afk_block_value_set(output, block_index, block_value);
+}
+
+static void mlxsw_sp2_afk_clear_block(char *output, int block_index)
+{
+	__mlxsw_sp2_afk_block_value_set(output, block_index, 0);
+}
+
 const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
 	.blocks		= mlxsw_sp2_afk_blocks,
 	.blocks_count	= ARRAY_SIZE(mlxsw_sp2_afk_blocks),
 	.encode_block	= mlxsw_sp2_afk_encode_block,
+	.clear_block	= mlxsw_sp2_afk_clear_block,
 };
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 219a4e2..9a73759 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -154,7 +154,9 @@ struct mlxsw_sp_acl_atcam_region {
 };
 
 struct mlxsw_sp_acl_atcam_entry_ht_key {
-	char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+	char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+							    * minus delta bits.
+							    */
 	u8 erp_id;
 };
 
@@ -165,9 +167,15 @@ struct mlxsw_sp_acl_atcam_chunk {
 struct mlxsw_sp_acl_atcam_entry {
 	struct rhash_head ht_node;
 	struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+	char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+	struct {
+		u16 start;
+		u8 mask;
+		u8 value;
+	} delta_info;
 	struct mlxsw_sp_acl_ctcam_entry centry;
 	struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
-	struct mlxsw_sp_acl_erp *erp;
+	struct mlxsw_sp_acl_erp_mask *erp_mask;
 };
 
 static inline struct mlxsw_sp_acl_atcam_region *
@@ -209,15 +217,27 @@ int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
 void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
 			     struct mlxsw_sp_acl_atcam *atcam);
 
-struct mlxsw_sp_acl_erp;
+struct mlxsw_sp_acl_erp_delta;
 
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
-		     const char *mask, bool ctcam);
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
-			  struct mlxsw_sp_acl_erp *erp);
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+				const char *enc_key);
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+				  const char *enc_key);
+
+struct mlxsw_sp_acl_erp_mask;
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+			  const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+			       struct mlxsw_sp_acl_erp_mask *erp_mask);
 int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
 void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
 int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index a3db033..71b2d20 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -15,6 +15,7 @@
 struct mlxsw_sp_fid_family;
 
 struct mlxsw_sp_fid_core {
+	struct rhashtable fid_ht;
 	struct rhashtable vni_ht;
 	struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
 	unsigned int *port_fid_mappings;
@@ -26,10 +27,12 @@ struct mlxsw_sp_fid {
 	unsigned int ref_count;
 	u16 fid_index;
 	struct mlxsw_sp_fid_family *fid_family;
+	struct rhash_head ht_node;
 
 	struct rhash_head vni_ht_node;
 	__be32 vni;
 	u32 nve_flood_index;
+	int nve_ifindex;
 	u8 vni_valid:1,
 	   nve_flood_index_valid:1;
 };
@@ -44,6 +47,12 @@ struct mlxsw_sp_fid_8021d {
 	int br_ifindex;
 };
 
+static const struct rhashtable_params mlxsw_sp_fid_ht_params = {
+	.key_len = sizeof_field(struct mlxsw_sp_fid, fid_index),
+	.key_offset = offsetof(struct mlxsw_sp_fid, fid_index),
+	.head_offset = offsetof(struct mlxsw_sp_fid, ht_node),
+};
+
 static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
 	.key_len = sizeof_field(struct mlxsw_sp_fid, vni),
 	.key_offset = offsetof(struct mlxsw_sp_fid, vni),
@@ -113,6 +122,29 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
 	[MLXSW_SP_FLOOD_TYPE_MC]	= mlxsw_sp_sfgc_mc_packet_types,
 };
 
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+						  u16 fid_index)
+{
+	struct mlxsw_sp_fid *fid;
+
+	fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
+				     mlxsw_sp_fid_ht_params);
+	if (fid)
+		fid->ref_count++;
+
+	return fid;
+}
+
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex)
+{
+	if (!fid->vni_valid)
+		return -EINVAL;
+
+	*nve_ifindex = fid->nve_ifindex;
+
+	return 0;
+}
+
 struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
 						__be32 vni)
 {
@@ -173,7 +205,7 @@ bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid)
 	return fid->nve_flood_index_valid;
 }
 
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni, int nve_ifindex)
 {
 	struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
 	const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
@@ -183,6 +215,7 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
 	if (WARN_ON(!ops->vni_set || fid->vni_valid))
 		return -EINVAL;
 
+	fid->nve_ifindex = nve_ifindex;
 	fid->vni = vni;
 	err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht,
 					    &fid->vni_ht_node,
@@ -944,10 +977,17 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
 	if (err)
 		goto err_configure;
 
+	err = rhashtable_insert_fast(&mlxsw_sp->fid_core->fid_ht, &fid->ht_node,
+				     mlxsw_sp_fid_ht_params);
+	if (err)
+		goto err_rhashtable_insert;
+
 	list_add(&fid->list, &fid_family->fids_list);
 	fid->ref_count++;
 	return fid;
 
+err_rhashtable_insert:
+	fid->fid_family->ops->deconfigure(fid);
 err_configure:
 	__clear_bit(fid_index - fid_family->start_index,
 		    fid_family->fids_bitmap);
@@ -959,6 +999,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
 void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
 {
 	struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+	struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
 
 	if (--fid->ref_count == 1 && fid->rif) {
 		/* Destroy the associated RIF and let it drop the last
@@ -967,6 +1008,8 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
 		return mlxsw_sp_rif_destroy(fid->rif);
 	} else if (fid->ref_count == 0) {
 		list_del(&fid->list);
+		rhashtable_remove_fast(&mlxsw_sp->fid_core->fid_ht,
+				       &fid->ht_node, mlxsw_sp_fid_ht_params);
 		fid->fid_family->ops->deconfigure(fid);
 		__clear_bit(fid->fid_index - fid_family->start_index,
 			    fid_family->fids_bitmap);
@@ -1126,9 +1169,13 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
 		return -ENOMEM;
 	mlxsw_sp->fid_core = fid_core;
 
+	err = rhashtable_init(&fid_core->fid_ht, &mlxsw_sp_fid_ht_params);
+	if (err)
+		goto err_rhashtable_fid_init;
+
 	err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params);
 	if (err)
-		goto err_rhashtable_init;
+		goto err_rhashtable_vni_init;
 
 	fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
 					      GFP_KERNEL);
@@ -1157,7 +1204,9 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
 	kfree(fid_core->port_fid_mappings);
 err_alloc_port_fid_mappings:
 	rhashtable_destroy(&fid_core->vni_ht);
-err_rhashtable_init:
+err_rhashtable_vni_init:
+	rhashtable_destroy(&fid_core->fid_ht);
+err_rhashtable_fid_init:
 	kfree(fid_core);
 	return err;
 }
@@ -1172,5 +1221,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
 					       fid_core->fid_family_arr[i]);
 	kfree(fid_core->port_fid_mappings);
 	rhashtable_destroy(&fid_core->vni_ht);
+	rhashtable_destroy(&fid_core->fid_ht);
 	kfree(fid_core);
 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index ad06d99..c4d5a08 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -174,6 +174,20 @@ mlxsw_sp_nve_mc_record_ops_arr[] = {
 	[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
 };
 
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+				    enum mlxsw_sp_l3proto proto,
+				    union mlxsw_sp_l3addr *addr)
+{
+	switch (proto) {
+	case MLXSW_SP_L3_PROTO_IPV4:
+		addr->addr4 = cpu_to_be32(uip);
+		return 0;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+}
+
 static struct mlxsw_sp_nve_mc_list *
 mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
 			  const struct mlxsw_sp_nve_mc_list_key *key)
@@ -803,7 +817,7 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
 		return err;
 	}
 
-	err = mlxsw_sp_fid_vni_set(fid, params->vni);
+	err = mlxsw_sp_fid_vni_set(fid, params->vni, params->dev->ifindex);
 	if (err) {
 		NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
 		goto err_fid_vni_set;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d21c7be..4e9cc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -17,7 +17,8 @@
 #define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
 #define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
 
-#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS	VXLAN_F_UDP_ZERO_CSUM_TX
+#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS	(VXLAN_F_UDP_ZERO_CSUM_TX | \
+						 VXLAN_F_LEARN)
 
 static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
 					    const struct net_device *dev,
@@ -61,11 +62,6 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
 		return false;
 	}
 
-	if (cfg->flags & VXLAN_F_LEARN) {
-		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported");
-		return false;
-	}
-
 	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
 		return false;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 739a51f..73e5db1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1968,8 +1968,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
-	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
-	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
 };
 
 static int
@@ -2312,6 +2310,71 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
 }
 
 static void
+mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
+				      enum mlxsw_sp_l3proto *proto,
+				      union mlxsw_sp_l3addr *addr)
+{
+	if (vxlan_addr->sa.sa_family == AF_INET) {
+		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
+		*proto = MLXSW_SP_L3_PROTO_IPV4;
+	} else {
+		addr->addr6 = vxlan_addr->sin6.sin6_addr;
+		*proto = MLXSW_SP_L3_PROTO_IPV6;
+	}
+}
+
+static void
+mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
+				      const union mlxsw_sp_l3addr *addr,
+				      union vxlan_addr *vxlan_addr)
+{
+	switch (proto) {
+	case MLXSW_SP_L3_PROTO_IPV4:
+		vxlan_addr->sa.sa_family = AF_INET;
+		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
+		break;
+	case MLXSW_SP_L3_PROTO_IPV6:
+		vxlan_addr->sa.sa_family = AF_INET6;
+		vxlan_addr->sin6.sin6_addr = addr->addr6;
+		break;
+	}
+}
+
+static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
+					      const char *mac,
+					      enum mlxsw_sp_l3proto proto,
+					      union mlxsw_sp_l3addr *addr,
+					      __be32 vni, bool adding)
+{
+	struct switchdev_notifier_vxlan_fdb_info info;
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	enum switchdev_notifier_type type;
+
+	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
+			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
+	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
+	info.remote_port = vxlan->cfg.dst_port;
+	info.remote_vni = vni;
+	info.remote_ifindex = 0;
+	ether_addr_copy(info.eth_addr, mac);
+	info.vni = vni;
+	info.offloaded = adding;
+	call_switchdev_notifiers(type, dev, &info.info);
+}
+
+static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
+					    const char *mac,
+					    enum mlxsw_sp_l3proto proto,
+					    union mlxsw_sp_l3addr *addr,
+					    __be32 vni,
+					    bool adding)
+{
+	if (netif_is_vxlan(dev))
+		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
+						  adding);
+}
+
+static void
 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
 			    const char *mac, u16 vid,
 			    struct net_device *dev, bool offloaded)
@@ -2442,6 +2505,122 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
 	goto do_fdb_op;
 }
 
+static int
+__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+					    const struct mlxsw_sp_fid *fid,
+					    bool adding,
+					    struct net_device **nve_dev,
+					    u16 *p_vid, __be32 *p_vni)
+{
+	struct mlxsw_sp_bridge_device *bridge_device;
+	struct net_device *br_dev, *dev;
+	int nve_ifindex;
+	int err;
+
+	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
+	if (err)
+		return err;
+
+	err = mlxsw_sp_fid_vni(fid, p_vni);
+	if (err)
+		return err;
+
+	dev = __dev_get_by_index(&init_net, nve_ifindex);
+	if (!dev)
+		return -EINVAL;
+	*nve_dev = dev;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
+		return -EINVAL;
+
+	if (adding && netif_is_vxlan(dev)) {
+		struct vxlan_dev *vxlan = netdev_priv(dev);
+
+		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
+			return -EINVAL;
+	}
+
+	br_dev = netdev_master_upper_dev_get(dev);
+	if (!br_dev)
+		return -EINVAL;
+
+	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+	if (!bridge_device)
+		return -EINVAL;
+
+	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
+
+	return 0;
+}
+
+static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+						      char *sfn_pl,
+						      int rec_index,
+						      bool adding)
+{
+	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
+	enum switchdev_notifier_type type;
+	struct net_device *nve_dev;
+	union mlxsw_sp_l3addr addr;
+	struct mlxsw_sp_fid *fid;
+	char mac[ETH_ALEN];
+	u16 fid_index, vid;
+	__be32 vni;
+	u32 uip;
+	int err;
+
+	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
+				       &uip, &sfn_proto);
+
+	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
+	if (!fid)
+		goto err_fid_lookup;
+
+	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
+					      (enum mlxsw_sp_l3proto) sfn_proto,
+					      &addr);
+	if (err)
+		goto err_ip_resolve;
+
+	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
+							  &nve_dev, &vid, &vni);
+	if (err)
+		goto err_fdb_process;
+
+	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+					     (enum mlxsw_sp_l3proto) sfn_proto,
+					     &addr, adding, true);
+	if (err)
+		goto err_fdb_op;
+
+	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
+					(enum mlxsw_sp_l3proto) sfn_proto,
+					&addr, vni, adding);
+
+	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
+			SWITCHDEV_FDB_DEL_TO_BRIDGE;
+	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
+
+	mlxsw_sp_fid_put(fid);
+
+	return;
+
+err_fdb_op:
+err_fdb_process:
+err_ip_resolve:
+	mlxsw_sp_fid_put(fid);
+err_fid_lookup:
+	/* Remove an FDB entry in case we cannot process it. Otherwise the
+	 * device will keep sending the same notification over and over again.
+	 */
+	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
+				       false, true);
+}
+
 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
 					    char *sfn_pl, int rec_index)
 {
@@ -2462,6 +2641,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
 						    rec_index, false);
 		break;
+	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
+		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+							  rec_index, true);
+		break;
+	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
+		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+							  rec_index, false);
+		break;
 	}
 }
 
@@ -2517,20 +2704,6 @@ struct mlxsw_sp_switchdev_event_work {
 };
 
 static void
-mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
-				      enum mlxsw_sp_l3proto *proto,
-				      union mlxsw_sp_l3addr *addr)
-{
-	if (vxlan_addr->sa.sa_family == AF_INET) {
-		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
-		*proto = MLXSW_SP_L3_PROTO_IPV4;
-	} else {
-		addr->addr6 = vxlan_addr->sin6.sin6_addr;
-		*proto = MLXSW_SP_L3_PROTO_IPV6;
-	}
-}
-
-static void
 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
 					  struct mlxsw_sp_switchdev_event_work *
 					  switchdev_work,
@@ -2595,7 +2768,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
 		return;
 
-	if (!switchdev_work->fdb_info.added_by_user)
+	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
+	    !switchdev_work->fdb_info.added_by_user)
 		return;
 
 	if (!netif_running(dev))
@@ -2942,6 +3116,32 @@ static struct notifier_block mlxsw_sp_switchdev_notifier = {
 	.notifier_call = mlxsw_sp_switchdev_event,
 };
 
+static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
+					     unsigned long event, void *ptr)
+{
+	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+	int err;
+
+	switch (event) {
+	case SWITCHDEV_PORT_OBJ_ADD:
+		err = switchdev_handle_port_obj_add(dev, ptr,
+						    mlxsw_sp_port_dev_check,
+						    mlxsw_sp_port_obj_add);
+		return notifier_from_errno(err);
+	case SWITCHDEV_PORT_OBJ_DEL:
+		err = switchdev_handle_port_obj_del(dev, ptr,
+						    mlxsw_sp_port_dev_check,
+						    mlxsw_sp_port_obj_del);
+		return notifier_from_errno(err);
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
+	.notifier_call = mlxsw_sp_switchdev_blocking_event,
+};
+
 u8
 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
 {
@@ -2951,6 +3151,7 @@ mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+	struct notifier_block *nb;
 	int err;
 
 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
@@ -2965,17 +3166,33 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
 		return err;
 	}
 
+	nb = &mlxsw_sp_switchdev_blocking_notifier;
+	err = register_switchdev_blocking_notifier(nb);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
+		goto err_register_switchdev_blocking_notifier;
+	}
+
 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
 	return 0;
+
+err_register_switchdev_blocking_notifier:
+	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+	return err;
 }
 
 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
 {
-	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
-	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+	struct notifier_block *nb;
 
+	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
+
+	nb = &mlxsw_sp_switchdev_blocking_notifier;
+	unregister_switchdev_blocking_notifier(nb);
+
+	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
 }
 
 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 3238b9e..7f8da88 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1337,8 +1337,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
 static const struct switchdev_ops ocelot_port_switchdev_ops = {
 	.switchdev_port_attr_get	= ocelot_port_attr_get,
 	.switchdev_port_attr_set	= ocelot_port_attr_set,
-	.switchdev_port_obj_add		= ocelot_port_obj_add,
-	.switchdev_port_obj_del		= ocelot_port_obj_del,
 };
 
 static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
@@ -1595,6 +1593,34 @@ struct notifier_block ocelot_netdevice_nb __read_mostly = {
 };
 EXPORT_SYMBOL(ocelot_netdevice_nb);
 
+static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
+					   unsigned long event, void *ptr)
+{
+	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+	int err;
+
+	switch (event) {
+		/* Blocking events. */
+	case SWITCHDEV_PORT_OBJ_ADD:
+		err = switchdev_handle_port_obj_add(dev, ptr,
+						    ocelot_netdevice_dev_check,
+						    ocelot_port_obj_add);
+		return notifier_from_errno(err);
+	case SWITCHDEV_PORT_OBJ_DEL:
+		err = switchdev_handle_port_obj_del(dev, ptr,
+						    ocelot_netdevice_dev_check,
+						    ocelot_port_obj_del);
+		return notifier_from_errno(err);
+	}
+
+	return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
+	.notifier_call = ocelot_switchdev_blocking_event,
+};
+EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+
 int ocelot_probe_port(struct ocelot *ocelot, u8 port,
 		      void __iomem *regs,
 		      struct phy_device *phy)
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 62c7c8e..086775f 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -499,5 +499,6 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
 		      struct phy_device *phy);
 
 extern struct notifier_block ocelot_netdevice_nb;
+extern struct notifier_block ocelot_switchdev_blocking_nb;
 
 #endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 4c23d18..ca3ea2f 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -12,6 +12,7 @@
 #include <linux/of_platform.h>
 #include <linux/mfd/syscon.h>
 #include <linux/skbuff.h>
+#include <net/switchdev.h>
 
 #include "ocelot.h"
 
@@ -328,6 +329,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
 	}
 
 	register_netdevice_notifier(&ocelot_netdevice_nb);
+	register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
 
 	dev_info(&pdev->dev, "Ocelot switch probed\n");
 
@@ -342,6 +344,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
 	struct ocelot *ocelot = platform_get_drvdata(pdev);
 
 	ocelot_deinit(ocelot);
+	unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
 	unregister_netdevice_notifier(&ocelot_netdevice_nb);
 
 	return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4afb103..47c708f 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -56,7 +56,9 @@
 
 ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
 nfp-objs += \
+	    abm/cls.o \
 	    abm/ctrl.o \
+	    abm/qdisc.o \
 	    abm/main.o
 endif
 
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
new file mode 100644
index 0000000..9852080
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_net_repr.h"
+#include "main.h"
+
+struct nfp_abm_u32_match {
+	u32 handle;
+	u32 band;
+	u8 mask;
+	u8 val;
+	struct list_head list;
+};
+
+static bool
+nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
+			__be16 proto, struct netlink_ext_ack *extack)
+{
+	struct tc_u32_key *k;
+	unsigned int tos_off;
+
+	if (knode->exts && tcf_exts_has_actions(knode->exts)) {
+		NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
+		return false;
+	}
+	if (knode->link_handle) {
+		NL_SET_ERR_MSG_MOD(extack, "linking not supported");
+		return false;
+	}
+	if (knode->sel->flags != TC_U32_TERMINAL) {
+		NL_SET_ERR_MSG_MOD(extack,
+				   "flags must be equal to TC_U32_TERMINAL");
+		return false;
+	}
+	if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
+	    knode->sel->offoff || knode->fshift) {
+		NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+		return false;
+	}
+	if (knode->sel->hoff || knode->sel->hmask) {
+		NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
+		return false;
+	}
+	if (knode->val || knode->mask) {
+		NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
+		return false;
+	}
+	if (knode->res && knode->res->class) {
+		NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
+		return false;
+	}
+	if (knode->res && knode->res->classid >= abm->num_bands) {
+		NL_SET_ERR_MSG_MOD(extack,
+				   "classid higher than number of bands");
+		return false;
+	}
+	if (knode->sel->nkeys != 1) {
+		NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
+		return false;
+	}
+
+	switch (proto) {
+	case htons(ETH_P_IP):
+		tos_off = 16;
+		break;
+	case htons(ETH_P_IPV6):
+		tos_off = 20;
+		break;
+	default:
+		NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
+		return false;
+	}
+
+	k = &knode->sel->keys[0];
+	if (k->offmask) {
+		NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+		return false;
+	}
+	if (k->off) {
+		NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
+		return false;
+	}
+	if (k->val & ~k->mask) {
+		NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
+		return false;
+	}
+	if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
+		NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
+		nfp_err(abm->app->cpp,
+			"u32 offload: requested mask %x FW can support only %x\n",
+			be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
+		return false;
+	}
+
+	return true;
+}
+
+/* This filter list -> map conversion is O(n * m), we expect single digit or
+ * low double digit number of prios and likewise for the filters.  Also u32
+ * doesn't report stats, so it's really only setup time cost.
+ */
+static unsigned int
+nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
+{
+	struct nfp_abm_u32_match *iter;
+
+	list_for_each_entry(iter, &alink->dscp_map, list)
+		if ((prio & iter->mask) == iter->val)
+			return iter->band;
+
+	return alink->def_band;
+}
+
+static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
+{
+	unsigned int i, bits_per_prio, prios_per_word, base_shift;
+	struct nfp_abm *abm = alink->abm;
+	u32 field_mask;
+
+	alink->has_prio = !list_empty(&alink->dscp_map);
+
+	bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
+	field_mask = (1 << bits_per_prio) - 1;
+	prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
+
+	/* FW mask applies from top bits */
+	base_shift = 8 - order_base_2(abm->num_prios);
+
+	for (i = 0; i < abm->num_prios; i++) {
+		unsigned int offset;
+		u32 *word;
+		u8 band;
+
+		word = &alink->prio_map[i / prios_per_word];
+		offset = (i % prios_per_word) * bits_per_prio;
+
+		band = nfp_abm_find_band_for_prio(alink, i << base_shift);
+
+		*word &= ~(field_mask << offset);
+		*word |= band << offset;
+	}
+
+	/* Qdisc offload status may change if has_prio changed */
+	nfp_abm_qdisc_offload_update(alink);
+
+	return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+}
+
+static void
+nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
+			 struct tc_cls_u32_knode *knode)
+{
+	struct nfp_abm_u32_match *iter;
+
+	list_for_each_entry(iter, &alink->dscp_map, list)
+		if (iter->handle == knode->handle) {
+			list_del(&iter->list);
+			kfree(iter);
+			nfp_abm_update_band_map(alink);
+			return;
+		}
+}
+
+static int
+nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+			  struct tc_cls_u32_knode *knode,
+			  __be16 proto, struct netlink_ext_ack *extack)
+{
+	struct nfp_abm_u32_match *match = NULL, *iter;
+	unsigned int tos_off;
+	u8 mask, val;
+	int err;
+
+	if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+		goto err_delete;
+
+	tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
+
+	/* Extract the DSCP Class Selector bits */
+	val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
+	mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
+
+	/* Check if there is no conflicting mapping and find match by handle */
+	list_for_each_entry(iter, &alink->dscp_map, list) {
+		u32 cmask;
+
+		if (iter->handle == knode->handle) {
+			match = iter;
+			continue;
+		}
+
+		cmask = iter->mask & mask;
+		if ((iter->val & cmask) == (val & cmask) &&
+		    iter->band != knode->res->classid) {
+			NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+			goto err_delete;
+		}
+	}
+
+	if (!match) {
+		match = kzalloc(sizeof(*match), GFP_KERNEL);
+		if (!match)
+			return -ENOMEM;
+		list_add(&match->list, &alink->dscp_map);
+	}
+	match->handle = knode->handle;
+	match->band = knode->res->classid;
+	match->mask = mask;
+	match->val = val;
+
+	err = nfp_abm_update_band_map(alink);
+	if (err)
+		goto err_delete;
+
+	return 0;
+
+err_delete:
+	nfp_abm_u32_knode_delete(alink, knode);
+	return -EOPNOTSUPP;
+}
+
+static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
+				     void *type_data, void *cb_priv)
+{
+	struct tc_cls_u32_offload *cls_u32 = type_data;
+	struct nfp_repr *repr = cb_priv;
+	struct nfp_abm_link *alink;
+
+	alink = repr->app_priv;
+
+	if (type != TC_SETUP_CLSU32) {
+		NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+				   "only offload of u32 classifier supported");
+		return -EOPNOTSUPP;
+	}
+	if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
+		return -EOPNOTSUPP;
+
+	if (cls_u32->common.protocol != htons(ETH_P_IP) &&
+	    cls_u32->common.protocol != htons(ETH_P_IPV6)) {
+		NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+				   "only IP and IPv6 supported as filter protocol");
+		return -EOPNOTSUPP;
+	}
+
+	switch (cls_u32->command) {
+	case TC_CLSU32_NEW_KNODE:
+	case TC_CLSU32_REPLACE_KNODE:
+		return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
+						 cls_u32->common.protocol,
+						 cls_u32->common.extack);
+	case TC_CLSU32_DELETE_KNODE:
+		nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+			    struct tc_block_offload *f)
+{
+	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+		return -EOPNOTSUPP;
+
+	switch (f->command) {
+	case TC_BLOCK_BIND:
+		return tcf_block_cb_register(f->block,
+					     nfp_abm_setup_tc_block_cb,
+					     repr, repr, f->extack);
+	case TC_BLOCK_UNBIND:
+		tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
+					repr);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 3c661f4..ad6c2a6 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,7 +1,9 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 /* Copyright (C) 2018 Netronome Systems, Inc. */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
+#include <linux/log2.h>
 
 #include "../nfpcore/nfp_cpp.h"
 #include "../nfpcore/nfp_nffw.h"
@@ -11,38 +13,56 @@
 #include "../nfp_net.h"
 #include "main.h"
 
-#define NFP_QLVL_SYM_NAME	"_abi_nfd_out_q_lvls_%u"
+#define NFP_NUM_PRIOS_SYM_NAME	"_abi_pci_dscp_num_prio_%u"
+#define NFP_NUM_BANDS_SYM_NAME	"_abi_pci_dscp_num_band_%u"
+#define NFP_ACT_MASK_SYM_NAME	"_abi_nfd_out_q_actions_%u"
+
+#define NFP_QLVL_SYM_NAME	"_abi_nfd_out_q_lvls_%u%s"
 #define NFP_QLVL_STRIDE		16
 #define NFP_QLVL_BLOG_BYTES	0
 #define NFP_QLVL_BLOG_PKTS	4
 #define NFP_QLVL_THRS		8
+#define NFP_QLVL_ACT		12
 
-#define NFP_QMSTAT_SYM_NAME	"_abi_nfdqm%u_stats"
+#define NFP_QMSTAT_SYM_NAME	"_abi_nfdqm%u_stats%s"
 #define NFP_QMSTAT_STRIDE	32
 #define NFP_QMSTAT_NON_STO	0
 #define NFP_QMSTAT_STO		8
 #define NFP_QMSTAT_DROP		16
 #define NFP_QMSTAT_ECN		24
 
+#define NFP_Q_STAT_SYM_NAME	"_abi_nfd_rxq_stats%u%s"
+#define NFP_Q_STAT_STRIDE	16
+#define NFP_Q_STAT_PKTS		0
+#define NFP_Q_STAT_BYTES	8
+
+#define NFP_NET_ABM_MBOX_CMD		NFP_NET_CFG_MBOX_SIMPLE_CMD
+#define NFP_NET_ABM_MBOX_RET		NFP_NET_CFG_MBOX_SIMPLE_RET
+#define NFP_NET_ABM_MBOX_DATALEN	NFP_NET_CFG_MBOX_SIMPLE_VAL
+#define NFP_NET_ABM_MBOX_RESERVED	(NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
+#define NFP_NET_ABM_MBOX_DATA		(NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
+
 static int
 nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
-		  unsigned int stride, unsigned int offset, unsigned int i,
-		  bool is_u64, u64 *res)
+		  unsigned int stride, unsigned int offset, unsigned int band,
+		  unsigned int queue, bool is_u64, u64 *res)
 {
 	struct nfp_cpp *cpp = alink->abm->app->cpp;
 	u64 val, sym_offset;
+	unsigned int qid;
 	u32 val32;
 	int err;
 
-	sym_offset = (alink->queue_base + i) * stride + offset;
+	qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+	sym_offset = qid * stride + offset;
 	if (is_u64)
 		err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
 	else
 		err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
 	if (err) {
-		nfp_err(cpp,
-			"RED offload reading stat failed on vNIC %d queue %d\n",
-			alink->id, i);
+		nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
+			alink->id, band, queue, alink->queue_base);
 		return err;
 	}
 
@@ -50,175 +70,179 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
 	return 0;
 }
 
-static int
-nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
-		      unsigned int stride, unsigned int offset, bool is_u64,
-		      u64 *res)
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
 {
-	u64 val, sum = 0;
-	unsigned int i;
-	int err;
-
-	for (i = 0; i < alink->vnic->max_rx_rings; i++) {
-		err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
-					is_u64, &val);
-		if (err)
-			return err;
-		sum += val;
-	}
-
-	*res = sum;
-	return 0;
-}
-
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
-{
-	struct nfp_cpp *cpp = alink->abm->app->cpp;
+	struct nfp_cpp *cpp = abm->app->cpp;
 	u64 sym_offset;
 	int err;
 
-	sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
-	err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
-				 sym_offset, val);
+	__clear_bit(id, abm->threshold_undef);
+	if (abm->thresholds[id] == val)
+		return 0;
+
+	sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+	err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
 	if (err) {
-		nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
-			alink->id, i);
+		nfp_err(cpp,
+			"RED offload setting level failed on subqueue %d\n",
+			id);
 		return err;
 	}
 
+	abm->thresholds[id] = val;
 	return 0;
 }
 
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+			   unsigned int queue, u32 val)
 {
-	int i, err;
+	unsigned int threshold;
 
-	for (i = 0; i < alink->vnic->max_rx_rings; i++) {
-		err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
-		if (err)
-			return err;
+	threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+	return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
+}
+
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+			     enum nfp_abm_q_action act)
+{
+	struct nfp_cpp *cpp = abm->app->cpp;
+	u64 sym_offset;
+	int err;
+
+	if (abm->actions[id] == act)
+		return 0;
+
+	sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
+	err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
+	if (err) {
+		nfp_err(cpp,
+			"RED offload setting action failed on subqueue %d\n",
+			id);
+		return err;
 	}
 
+	abm->actions[id] = act;
 	return 0;
 }
 
-u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+			   unsigned int queue, enum nfp_abm_q_action act)
 {
-	u64 val;
+	unsigned int qid;
 
-	if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
-			      NFP_QMSTAT_NON_STO, i, true, &val))
-		return 0;
-	return val;
+	qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+	return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
 }
 
-u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
+u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
 {
-	u64 val;
+	unsigned int band;
+	u64 val, sum = 0;
 
-	if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
-			      NFP_QMSTAT_STO, i, true, &val))
-		return 0;
-	return val;
+	for (band = 0; band < alink->abm->num_bands; band++) {
+		if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+				      NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
+				      band, queue, true, &val))
+			return 0;
+		sum += val;
+	}
+
+	return sum;
 }
 
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
-			      struct nfp_alink_stats *stats)
+u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
+{
+	unsigned int band;
+	u64 val, sum = 0;
+
+	for (band = 0; band < alink->abm->num_bands; band++) {
+		if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+				      NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
+				      band, queue, true, &val))
+			return 0;
+		sum += val;
+	}
+
+	return sum;
+}
+
+static int
+nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
+			unsigned int queue, unsigned int off, u64 *val)
+{
+	if (!nfp_abm_has_prio(alink->abm)) {
+		if (!band) {
+			unsigned int id = alink->queue_base + queue;
+
+			*val = nn_readq(alink->vnic,
+					NFP_NET_CFG_RXR_STATS(id) + off);
+		} else {
+			*val = 0;
+		}
+
+		return 0;
+	} else {
+		return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
+					 NFP_Q_STAT_STRIDE, off, band, queue,
+					 true, val);
+	}
+}
+
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
+			      unsigned int queue, struct nfp_alink_stats *stats)
 {
 	int err;
 
-	stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
-	stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
+	err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
+				      &stats->tx_pkts);
+	if (err)
+		return err;
 
-	err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
-				NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
-				i, false, &stats->backlog_bytes);
+	err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
+				      &stats->tx_bytes);
+	if (err)
+		return err;
+
+	err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
+				NFP_QLVL_BLOG_BYTES, band, queue, false,
+				&stats->backlog_bytes);
 	if (err)
 		return err;
 
 	err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
 				NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
-				i, false, &stats->backlog_pkts);
+				band, queue, false, &stats->backlog_pkts);
 	if (err)
 		return err;
 
 	err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 				NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
-				i, true, &stats->drops);
+				band, queue, true, &stats->drops);
 	if (err)
 		return err;
 
 	return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 				 NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
-				 i, true, &stats->overlimits);
+				 band, queue, true, &stats->overlimits);
 }
 
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
-			    struct nfp_alink_stats *stats)
-{
-	u64 pkts = 0, bytes = 0;
-	int i, err;
-
-	for (i = 0; i < alink->vnic->max_rx_rings; i++) {
-		pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
-		bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
-	}
-	stats->tx_pkts = pkts;
-	stats->tx_bytes = bytes;
-
-	err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
-				    NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
-				    false, &stats->backlog_bytes);
-	if (err)
-		return err;
-
-	err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
-				    NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
-				    false, &stats->backlog_pkts);
-	if (err)
-		return err;
-
-	err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
-				    NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
-				    true, &stats->drops);
-	if (err)
-		return err;
-
-	return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
-				     NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
-				     true, &stats->overlimits);
-}
-
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+			       unsigned int band, unsigned int queue,
 			       struct nfp_alink_xstats *xstats)
 {
 	int err;
 
 	err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 				NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
-				i, true, &xstats->pdrop);
+				band, queue, true, &xstats->pdrop);
 	if (err)
 		return err;
 
 	return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 				 NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
-				 i, true, &xstats->ecn_marked);
-}
-
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
-			     struct nfp_alink_xstats *xstats)
-{
-	int err;
-
-	err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
-				    NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
-				    true, &xstats->pdrop);
-	if (err)
-		return err;
-
-	return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
-				     NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
-				     true, &xstats->ecn_marked);
+				 band, queue, true, &xstats->ecn_marked);
 }
 
 int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
@@ -233,10 +257,64 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
 			    NULL, 0, NULL, 0);
 }
 
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
+{
+	struct nfp_net *nn = alink->vnic;
+	unsigned int i;
+	int err;
+
+	/* Write data_len and wipe reserved */
+	nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
+		  alink->abm->prio_map_len);
+
+	for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
+		nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
+			  packed[i / sizeof(u32)]);
+
+	err = nfp_net_reconfig_mbox(nn,
+				    NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+	if (err)
+		nfp_err(alink->abm->app->cpp,
+			"setting DSCP -> VQ map failed with error %d\n", err);
+	return err;
+}
+
+static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
+{
+	struct nfp_abm *abm = alink->abm;
+	struct nfp_net *nn = alink->vnic;
+	unsigned int min_mbox_sz;
+
+	if (!nfp_abm_has_prio(alink->abm))
+		return 0;
+
+	min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
+	if (nn->tlv_caps.mbox_len < min_mbox_sz) {
+		nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
+			nn->tlv_caps.mbox_len,  min_mbox_sz);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
 {
 	alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
 	alink->queue_base /= alink->vnic->stride_rx;
+
+	return nfp_abm_ctrl_prio_check_params(alink);
+}
+
+static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
+{
+	unsigned int size;
+
+	size = roundup_pow_of_two(order_base_2(abm->num_bands));
+	size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
+	size = round_up(size, sizeof(u32));
+
+	return size;
 }
 
 static const struct nfp_rtsym *
@@ -260,33 +338,77 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
 }
 
 static const struct nfp_rtsym *
-nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name,
-			  unsigned int size)
+nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
+			  size_t size)
 {
-	return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS);
+	char pf_symbol[64];
+
+	size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
+	snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
+		 abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
+
+	return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
 }
 
 int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
 {
 	struct nfp_pf *pf = abm->app->pf;
 	const struct nfp_rtsym *sym;
-	unsigned int pf_id;
-	char pf_symbol[64];
+	int res;
 
-	pf_id =	nfp_cppcore_pcie_unit(pf->cpp);
-	abm->pf_id = pf_id;
+	abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
 
-	snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id);
-	sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE);
+	/* Read count of prios and prio bands */
+	res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
+	if (res < 0)
+		return res;
+	abm->num_bands = res;
+
+	res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
+	if (res < 0)
+		return res;
+	abm->num_prios = res;
+
+	/* Read available actions */
+	res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
+					 BIT(NFP_ABM_ACT_MARK_DROP));
+	if (res < 0)
+		return res;
+	abm->action_mask = res;
+
+	abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
+	abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
+
+	/* Check values are sane, U16_MAX is arbitrarily chosen as max */
+	if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
+	    abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
+	    (abm->num_bands == 1) != (abm->num_prios == 1)) {
+		nfp_err(pf->cpp,
+			"invalid priomap description num bands: %u and num prios: %u\n",
+			abm->num_bands, abm->num_prios);
+		return -EINVAL;
+	}
+
+	/* Find level and stat symbols */
+	sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
+					NFP_QLVL_STRIDE);
 	if (IS_ERR(sym))
 		return PTR_ERR(sym);
 	abm->q_lvls = sym;
 
-	snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id);
-	sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE);
+	sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
+					NFP_QMSTAT_STRIDE);
 	if (IS_ERR(sym))
 		return PTR_ERR(sym);
 	abm->qm_stats = sym;
 
+	if (nfp_abm_has_prio(abm)) {
+		sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
+						NFP_Q_STAT_STRIDE);
+		if (IS_ERR(sym))
+			return PTR_ERR(sym);
+		abm->q_stats = sym;
+	}
+
 	return 0;
 }
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index c0830c0..7a4d55f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -2,14 +2,13 @@
 /* Copyright (C) 2018 Netronome Systems, Inc. */
 
 #include <linux/bitfield.h>
+#include <linux/bitmap.h>
 #include <linux/etherdevice.h>
 #include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
 #include <linux/slab.h>
-#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
-#include <net/red.h>
 
 #include "../nfpcore/nfp.h"
 #include "../nfpcore/nfp_cpp.h"
@@ -28,269 +27,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
 }
 
 static int
-__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
-		     u32 handle, unsigned int qs, u32 init_val)
-{
-	struct nfp_port *port = nfp_port_from_netdev(netdev);
-	int ret;
-
-	ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
-	memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
-
-	alink->parent = handle;
-	alink->num_qdiscs = qs;
-	port->tc_offload_cnt = qs;
-
-	return ret;
-}
-
-static void
-nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
-		   u32 handle, unsigned int qs)
-{
-	__nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
-}
-
-static int
-nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
-	unsigned int i = TC_H_MIN(opt->parent) - 1;
-
-	if (opt->parent == TC_H_ROOT)
-		i = 0;
-	else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
-		i = TC_H_MIN(opt->parent) - 1;
-	else
-		return -EOPNOTSUPP;
-
-	if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
-		return -EOPNOTSUPP;
-
-	return i;
-}
-
-static void
-nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
-		    u32 handle)
-{
-	unsigned int i;
-
-	for (i = 0; i < alink->num_qdiscs; i++)
-		if (handle == alink->qdiscs[i].handle)
-			break;
-	if (i == alink->num_qdiscs)
-		return;
-
-	if (alink->parent == TC_H_ROOT) {
-		nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
-	} else {
-		nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
-		memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
-	}
-}
-
-static int
-nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
-		    struct tc_red_qopt_offload *opt)
-{
-	bool existing;
-	int i, err;
-
-	i = nfp_abm_red_find(alink, opt);
-	existing = i >= 0;
-
-	if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
-		nfp_warn(alink->abm->app->cpp,
-			 "RED offload failed - unsupported parameters\n");
-		err = -EINVAL;
-		goto err_destroy;
-	}
-
-	if (existing) {
-		if (alink->parent == TC_H_ROOT)
-			err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
-		else
-			err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
-		if (err)
-			goto err_destroy;
-		return 0;
-	}
-
-	if (opt->parent == TC_H_ROOT) {
-		i = 0;
-		err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
-					   opt->set.min);
-	} else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
-		i = TC_H_MIN(opt->parent) - 1;
-		err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
-	} else {
-		return -EINVAL;
-	}
-	/* Set the handle to try full clean up, in case IO failed */
-	alink->qdiscs[i].handle = opt->handle;
-	if (err)
-		goto err_destroy;
-
-	if (opt->parent == TC_H_ROOT)
-		err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
-	else
-		err = nfp_abm_ctrl_read_q_stats(alink, i,
-						&alink->qdiscs[i].stats);
-	if (err)
-		goto err_destroy;
-
-	if (opt->parent == TC_H_ROOT)
-		err = nfp_abm_ctrl_read_xstats(alink,
-					       &alink->qdiscs[i].xstats);
-	else
-		err = nfp_abm_ctrl_read_q_xstats(alink, i,
-						 &alink->qdiscs[i].xstats);
-	if (err)
-		goto err_destroy;
-
-	alink->qdiscs[i].stats.backlog_pkts = 0;
-	alink->qdiscs[i].stats.backlog_bytes = 0;
-
-	return 0;
-err_destroy:
-	/* If the qdisc keeps on living, but we can't offload undo changes */
-	if (existing) {
-		opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
-		opt->set.qstats->backlog -=
-			alink->qdiscs[i].stats.backlog_bytes;
-	}
-	nfp_abm_red_destroy(netdev, alink, opt->handle);
-
-	return err;
-}
-
-static void
-nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
-		     struct tc_qopt_offload_stats *stats)
-{
-	_bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
-		       new->tx_pkts - old->tx_pkts);
-	stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
-	stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
-	stats->qstats->overlimits += new->overlimits - old->overlimits;
-	stats->qstats->drops += new->drops - old->drops;
-}
-
-static int
-nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
-	struct nfp_alink_stats *prev_stats;
-	struct nfp_alink_stats stats;
-	int i, err;
-
-	i = nfp_abm_red_find(alink, opt);
-	if (i < 0)
-		return i;
-	prev_stats = &alink->qdiscs[i].stats;
-
-	if (alink->parent == TC_H_ROOT)
-		err = nfp_abm_ctrl_read_stats(alink, &stats);
-	else
-		err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
-	if (err)
-		return err;
-
-	nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
-
-	*prev_stats = stats;
-
-	return 0;
-}
-
-static int
-nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
-	struct nfp_alink_xstats *prev_xstats;
-	struct nfp_alink_xstats xstats;
-	int i, err;
-
-	i = nfp_abm_red_find(alink, opt);
-	if (i < 0)
-		return i;
-	prev_xstats = &alink->qdiscs[i].xstats;
-
-	if (alink->parent == TC_H_ROOT)
-		err = nfp_abm_ctrl_read_xstats(alink, &xstats);
-	else
-		err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
-	if (err)
-		return err;
-
-	opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
-	opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
-
-	*prev_xstats = xstats;
-
-	return 0;
-}
-
-static int
-nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
-		     struct tc_red_qopt_offload *opt)
-{
-	switch (opt->command) {
-	case TC_RED_REPLACE:
-		return nfp_abm_red_replace(netdev, alink, opt);
-	case TC_RED_DESTROY:
-		nfp_abm_red_destroy(netdev, alink, opt->handle);
-		return 0;
-	case TC_RED_STATS:
-		return nfp_abm_red_stats(alink, opt);
-	case TC_RED_XSTATS:
-		return nfp_abm_red_xstats(alink, opt);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static int
-nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
-{
-	struct nfp_alink_stats stats;
-	unsigned int i;
-	int err;
-
-	for (i = 0; i < alink->num_qdiscs; i++) {
-		if (alink->qdiscs[i].handle == TC_H_UNSPEC)
-			continue;
-
-		err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
-		if (err)
-			return err;
-
-		nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
-				     &opt->stats);
-	}
-
-	return 0;
-}
-
-static int
-nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
-		    struct tc_mq_qopt_offload *opt)
-{
-	switch (opt->command) {
-	case TC_MQ_CREATE:
-		nfp_abm_reset_root(netdev, alink, opt->handle,
-				   alink->total_queues);
-		return 0;
-	case TC_MQ_DESTROY:
-		if (opt->handle == alink->parent)
-			nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
-		return 0;
-	case TC_MQ_STATS:
-		return nfp_abm_mq_stats(alink, opt);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static int
 nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
 		 enum tc_setup_type type, void *type_data)
 {
@@ -302,10 +38,16 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
 		return -EOPNOTSUPP;
 
 	switch (type) {
+	case TC_SETUP_ROOT_QDISC:
+		return nfp_abm_setup_root(netdev, repr->app_priv, type_data);
 	case TC_SETUP_QDISC_MQ:
 		return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
 	case TC_SETUP_QDISC_RED:
 		return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
+	case TC_SETUP_QDISC_GRED:
+		return nfp_abm_setup_tc_gred(netdev, repr->app_priv, type_data);
+	case TC_SETUP_BLOCK:
+		return nfp_abm_setup_cls_block(netdev, repr, type_data);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -573,31 +315,34 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
 	alink->abm = abm;
 	alink->vnic = nn;
 	alink->id = id;
-	alink->parent = TC_H_ROOT;
 	alink->total_queues = alink->vnic->max_rx_rings;
-	alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
-				 GFP_KERNEL);
-	if (!alink->qdiscs) {
-		err = -ENOMEM;
+
+	INIT_LIST_HEAD(&alink->dscp_map);
+
+	err = nfp_abm_ctrl_read_params(alink);
+	if (err)
 		goto err_free_alink;
-	}
+
+	alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
+	if (!alink->prio_map)
+		goto err_free_alink;
 
 	/* This is a multi-host app, make sure MAC/PHY is up, but don't
 	 * make the MAC/PHY state follow the state of any of the ports.
 	 */
 	err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
 	if (err < 0)
-		goto err_free_qdiscs;
+		goto err_free_priomap;
 
 	netif_keep_dst(nn->dp.netdev);
 
 	nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
-	nfp_abm_ctrl_read_params(alink);
+	INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
 
 	return 0;
 
-err_free_qdiscs:
-	kvfree(alink->qdiscs);
+err_free_priomap:
+	kfree(alink->prio_map);
 err_free_alink:
 	kfree(alink);
 	return err;
@@ -608,10 +353,20 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
 	struct nfp_abm_link *alink = nn->app_priv;
 
 	nfp_abm_kill_reprs(alink->abm, alink);
-	kvfree(alink->qdiscs);
+	WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
+	kfree(alink->prio_map);
 	kfree(alink);
 }
 
+static int nfp_abm_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+	struct nfp_abm_link *alink = nn->app_priv;
+
+	if (nfp_abm_has_prio(alink->abm))
+		return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+	return 0;
+}
+
 static u64 *
 nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data)
 {
@@ -664,6 +419,7 @@ static int nfp_abm_init(struct nfp_app *app)
 	struct nfp_pf *pf = app->pf;
 	struct nfp_reprs *reprs;
 	struct nfp_abm *abm;
+	unsigned int i;
 	int err;
 
 	if (!pf->eth_tbl) {
@@ -690,15 +446,35 @@ static int nfp_abm_init(struct nfp_app *app)
 	if (err)
 		goto err_free_abm;
 
+	err = -ENOMEM;
+	abm->num_thresholds = array_size(abm->num_bands, NFP_NET_MAX_RX_RINGS);
+	abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
+	if (!abm->threshold_undef)
+		goto err_free_abm;
+
+	abm->thresholds = kvcalloc(abm->num_thresholds,
+				   sizeof(*abm->thresholds), GFP_KERNEL);
+	if (!abm->thresholds)
+		goto err_free_thresh_umap;
+	for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++)
+		__nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+	abm->actions = kvcalloc(abm->num_thresholds, sizeof(*abm->actions),
+				GFP_KERNEL);
+	if (!abm->actions)
+		goto err_free_thresh;
+	for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++)
+		__nfp_abm_ctrl_set_q_act(abm, i, NFP_ABM_ACT_DROP);
+
 	/* We start in legacy mode, make sure advanced queuing is disabled */
 	err = nfp_abm_ctrl_qm_disable(abm);
 	if (err)
-		goto err_free_abm;
+		goto err_free_act;
 
 	err = -ENOMEM;
 	reprs = nfp_reprs_alloc(pf->max_data_vnics);
 	if (!reprs)
-		goto err_free_abm;
+		goto err_free_act;
 	RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
 
 	reprs = nfp_reprs_alloc(pf->max_data_vnics);
@@ -710,6 +486,12 @@ static int nfp_abm_init(struct nfp_app *app)
 
 err_free_phys:
 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_act:
+	kvfree(abm->actions);
+err_free_thresh:
+	kvfree(abm->thresholds);
+err_free_thresh_umap:
+	bitmap_free(abm->threshold_undef);
 err_free_abm:
 	kfree(abm);
 	app->priv = NULL;
@@ -723,6 +505,9 @@ static void nfp_abm_clean(struct nfp_app *app)
 	nfp_abm_eswitch_clean_up(abm);
 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+	bitmap_free(abm->threshold_undef);
+	kvfree(abm->actions);
+	kvfree(abm->thresholds);
 	kfree(abm);
 	app->priv = NULL;
 }
@@ -736,6 +521,7 @@ const struct nfp_app_type app_abm = {
 
 	.vnic_alloc	= nfp_abm_vnic_alloc,
 	.vnic_free	= nfp_abm_vnic_free,
+	.vnic_init	= nfp_abm_vnic_init,
 
 	.port_get_stats		= nfp_abm_port_get_stats,
 	.port_get_stats_count	= nfp_abm_port_get_stats_count,
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index f907b7d9..4dcf588 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -4,7 +4,19 @@
 #ifndef __NFP_ABM_H__
 #define __NFP_ABM_H__ 1
 
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
 #include <net/devlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
+ * 2.5ms / 400Hz seems more than sufficient for stats resolution.
+ */
+#define NFP_ABM_STATS_REFRESH_IVAL	(2500 * 1000) /* ns */
+
+#define NFP_ABM_LVL_INFINITY		S32_MAX
 
 struct nfp_app;
 struct nfp_net;
@@ -12,21 +24,62 @@ struct nfp_net;
 #define NFP_ABM_PORTID_TYPE	GENMASK(23, 16)
 #define NFP_ABM_PORTID_ID	GENMASK(7, 0)
 
+/* The possible actions if thresholds are exceeded */
+enum nfp_abm_q_action {
+	/* mark if ECN capable, otherwise drop */
+	NFP_ABM_ACT_MARK_DROP		= 0,
+	/* mark if ECN capable, otherwise goto QM */
+	NFP_ABM_ACT_MARK_QUEUE		= 1,
+	NFP_ABM_ACT_DROP		= 2,
+	NFP_ABM_ACT_QUEUE		= 3,
+	NFP_ABM_ACT_NOQUEUE		= 4,
+};
+
 /**
  * struct nfp_abm - ABM NIC app structure
  * @app:	back pointer to nfp_app
  * @pf_id:	ID of our PF link
+ *
+ * @num_prios:	number of supported DSCP priorities
+ * @num_bands:	number of supported DSCP priority bands
+ * @action_mask:	bitmask of supported actions
+ *
+ * @thresholds:		current threshold configuration
+ * @threshold_undef:	bitmap of thresholds which have not been set
+ * @actions:		current FW action configuration
+ * @num_thresholds:	number of @thresholds and bits in @threshold_undef
+ *
+ * @prio_map_len:	computed length of FW priority map (in bytes)
+ * @dscp_mask:		mask FW will apply on DSCP field
+ *
  * @eswitch_mode:	devlink eswitch mode, advanced functions only visible
  *			in switchdev mode
+ *
  * @q_lvls:	queue level control area
  * @qm_stats:	queue statistics symbol
+ * @q_stats:	basic queue statistics (only in per-band case)
  */
 struct nfp_abm {
 	struct nfp_app *app;
 	unsigned int pf_id;
+
+	unsigned int num_prios;
+	unsigned int num_bands;
+	unsigned int action_mask;
+
+	u32 *thresholds;
+	unsigned long *threshold_undef;
+	u8 *actions;
+	size_t num_thresholds;
+
+	unsigned int prio_map_len;
+	u8 dscp_mask;
+
 	enum devlink_eswitch_mode eswitch_mode;
+
 	const struct nfp_rtsym *q_lvls;
 	const struct nfp_rtsym *qm_stats;
+	const struct nfp_rtsym *q_stats;
 };
 
 /**
@@ -57,16 +110,76 @@ struct nfp_alink_xstats {
 	u64 pdrop;
 };
 
+enum nfp_qdisc_type {
+	NFP_QDISC_NONE = 0,
+	NFP_QDISC_MQ,
+	NFP_QDISC_RED,
+	NFP_QDISC_GRED,
+};
+
+#define NFP_QDISC_UNTRACKED	((struct nfp_qdisc *)1UL)
+
 /**
- * struct nfp_red_qdisc - representation of single RED Qdisc
- * @handle:	handle of currently offloaded RED Qdisc
- * @stats:	statistics from last refresh
- * @xstats:	base of extended statistics
+ * struct nfp_qdisc - tracked TC Qdisc
+ * @netdev:		netdev on which Qdisc was created
+ * @type:		Qdisc type
+ * @handle:		handle of this Qdisc
+ * @parent_handle:	handle of the parent (unreliable if Qdisc was grafted)
+ * @use_cnt:		number of attachment points in the hierarchy
+ * @num_children:	current size of the @children array
+ * @children:		pointers to children
+ *
+ * @params_ok:		parameters of this Qdisc are OK for offload
+ * @offload_mark:	offload refresh state - selected for offload
+ * @offloaded:		Qdisc is currently offloaded to the HW
+ *
+ * @mq:			MQ Qdisc specific parameters and state
+ * @mq.stats:		current stats of the MQ Qdisc
+ * @mq.prev_stats:	previously reported @mq.stats
+ *
+ * @red:		RED Qdisc specific parameters and state
+ * @red.num_bands:	Number of valid entries in the @red.band table
+ * @red.band:		Per-band array of RED instances
+ * @red.band.ecn:		ECN marking is enabled (rather than drop)
+ * @red.band.threshold:		ECN marking threshold
+ * @red.band.stats:		current stats of the RED Qdisc
+ * @red.band.prev_stats:	previously reported @red.stats
+ * @red.band.xstats:		extended stats for RED - current
+ * @red.band.prev_xstats:	extended stats for RED - previously reported
  */
-struct nfp_red_qdisc {
+struct nfp_qdisc {
+	struct net_device *netdev;
+	enum nfp_qdisc_type type;
 	u32 handle;
-	struct nfp_alink_stats stats;
-	struct nfp_alink_xstats xstats;
+	u32 parent_handle;
+	unsigned int use_cnt;
+	unsigned int num_children;
+	struct nfp_qdisc **children;
+
+	bool params_ok;
+	bool offload_mark;
+	bool offloaded;
+
+	union {
+		/* NFP_QDISC_MQ */
+		struct {
+			struct nfp_alink_stats stats;
+			struct nfp_alink_stats prev_stats;
+		} mq;
+		/* TC_SETUP_QDISC_RED, TC_SETUP_QDISC_GRED */
+		struct {
+			unsigned int num_bands;
+
+			struct {
+				bool ecn;
+				u32 threshold;
+				struct nfp_alink_stats stats;
+				struct nfp_alink_stats prev_stats;
+				struct nfp_alink_xstats xstats;
+				struct nfp_alink_xstats prev_xstats;
+			} band[MAX_DPs];
+		} red;
+	};
 };
 
 /**
@@ -76,9 +189,17 @@ struct nfp_red_qdisc {
  * @id:		id of the data vNIC
  * @queue_base:	id of base to host queue within PCIe (not QC idx)
  * @total_queues:	number of PF queues
- * @parent:	handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
- * @num_qdiscs:	number of currently used qdiscs
- * @qdiscs:	array of qdiscs
+ *
+ * @last_stats_update:	ktime of last stats update
+ *
+ * @prio_map:		current map of priorities
+ * @has_prio:		@prio_map is valid
+ *
+ * @def_band:		default band to use
+ * @dscp_map:		list of DSCP to band mappings
+ *
+ * @root_qdisc:	pointer to the current root of the Qdisc hierarchy
+ * @qdiscs:	all qdiscs recorded by major part of the handle
  */
 struct nfp_abm_link {
 	struct nfp_abm *abm;
@@ -86,26 +207,65 @@ struct nfp_abm_link {
 	unsigned int id;
 	unsigned int queue_base;
 	unsigned int total_queues;
-	u32 parent;
-	unsigned int num_qdiscs;
-	struct nfp_red_qdisc *qdiscs;
+
+	u64 last_stats_update;
+
+	u32 *prio_map;
+	bool has_prio;
+
+	u8 def_band;
+	struct list_head dscp_map;
+
+	struct nfp_qdisc *root_qdisc;
+	struct radix_tree_root qdiscs;
 };
 
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
+static inline bool nfp_abm_has_prio(struct nfp_abm *abm)
+{
+	return abm->num_bands > 1;
+}
+
+static inline bool nfp_abm_has_drop(struct nfp_abm *abm)
+{
+	return abm->action_mask & BIT(NFP_ABM_ACT_DROP);
+}
+
+static inline bool nfp_abm_has_mark(struct nfp_abm *abm)
+{
+	return abm->action_mask & BIT(NFP_ABM_ACT_MARK_DROP);
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+		       struct tc_root_qopt_offload *opt);
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+			 struct tc_red_qopt_offload *opt);
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+			struct tc_mq_qopt_offload *opt);
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+			  struct tc_gred_qopt_offload *opt);
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+			    struct tc_block_offload *opt);
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
 int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
-			   u32 val);
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
-			    struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+			   unsigned int queue, u32 val);
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+			     enum nfp_abm_q_action act);
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+			   unsigned int queue, enum nfp_abm_q_action act);
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink,
+			      unsigned int band, unsigned int queue,
 			      struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
-			     struct nfp_alink_xstats *xstats);
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+			       unsigned int band, unsigned int queue,
 			       struct nfp_alink_xstats *xstats);
 u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
 u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
 int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
 int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm);
+void nfp_abm_prio_map_update(struct nfp_abm *abm);
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed);
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
new file mode 100644
index 0000000..2473fb5
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/red.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
+{
+	return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
+}
+
+static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
+{
+	return qdisc->children[id] &&
+	       qdisc->children[id] != NFP_QDISC_UNTRACKED;
+}
+
+static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
+{
+	return rtnl_dereference(*slot);
+}
+
+static void
+nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
+			struct nfp_alink_stats *child)
+{
+	parent->tx_pkts		+= child->tx_pkts;
+	parent->tx_bytes	+= child->tx_bytes;
+	parent->backlog_pkts	+= child->backlog_pkts;
+	parent->backlog_bytes	+= child->backlog_bytes;
+	parent->overlimits	+= child->overlimits;
+	parent->drops		+= child->drops;
+}
+
+static void
+nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+			 unsigned int queue)
+{
+	struct nfp_cpp *cpp = alink->abm->app->cpp;
+	unsigned int i;
+	int err;
+
+	if (!qdisc->offloaded)
+		return;
+
+	for (i = 0; i < qdisc->red.num_bands; i++) {
+		err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
+						&qdisc->red.band[i].stats);
+		if (err)
+			nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
+				i, queue, err);
+
+		err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
+						 &qdisc->red.band[i].xstats);
+		if (err)
+			nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
+				i, queue, err);
+	}
+}
+
+static void
+nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+	unsigned int i;
+
+	if (qdisc->type != NFP_QDISC_MQ)
+		return;
+
+	for (i = 0; i < alink->total_queues; i++)
+		if (nfp_abm_qdisc_child_valid(qdisc, i))
+			nfp_abm_stats_update_red(alink, qdisc->children[i], i);
+}
+
+static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
+{
+	alink->last_stats_update = time_now;
+	if (alink->root_qdisc)
+		nfp_abm_stats_update_mq(alink, alink->root_qdisc);
+}
+
+static void nfp_abm_stats_update(struct nfp_abm_link *alink)
+{
+	u64 now;
+
+	/* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
+	 * of all their leafs, so we would read the same stat multiple times
+	 * for every dump.
+	 */
+	now = ktime_get();
+	if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
+		return;
+
+	__nfp_abm_stats_update(alink, now);
+}
+
+static void
+nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
+			      unsigned int start, unsigned int end)
+{
+	unsigned int i;
+
+	for (i = start; i < end; i++)
+		if (nfp_abm_qdisc_child_valid(qdisc, i)) {
+			qdisc->children[i]->use_cnt--;
+			qdisc->children[i] = NULL;
+		}
+}
+
+static void
+nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+	unsigned int i;
+
+	/* Don't complain when qdisc is getting unlinked */
+	if (qdisc->use_cnt)
+		nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
+			 qdisc->handle);
+
+	if (!nfp_abm_qdisc_is_red(qdisc))
+		return;
+
+	for (i = 0; i < qdisc->red.num_bands; i++) {
+		qdisc->red.band[i].stats.backlog_pkts = 0;
+		qdisc->red.band[i].stats.backlog_bytes = 0;
+	}
+}
+
+static int
+__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
+		     unsigned int queue, struct nfp_alink_stats *prev_stats,
+		     struct nfp_alink_xstats *prev_xstats)
+{
+	u64 backlog_pkts, backlog_bytes;
+	int err;
+
+	/* Don't touch the backlog, backlog can only be reset after it has
+	 * been reported back to the tc qdisc stats.
+	 */
+	backlog_pkts = prev_stats->backlog_pkts;
+	backlog_bytes = prev_stats->backlog_bytes;
+
+	err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
+	if (err) {
+		nfp_err(alink->abm->app->cpp,
+			"RED stats init (%d, %d) failed with error %d\n",
+			band, queue, err);
+		return err;
+	}
+
+	err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
+	if (err) {
+		nfp_err(alink->abm->app->cpp,
+			"RED xstats init (%d, %d) failed with error %d\n",
+			band, queue, err);
+		return err;
+	}
+
+	prev_stats->backlog_pkts = backlog_pkts;
+	prev_stats->backlog_bytes = backlog_bytes;
+	return 0;
+}
+
+static int
+nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+		   unsigned int queue)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < qdisc->red.num_bands; i++) {
+		err = __nfp_abm_stats_init(alink, i, queue,
+					   &qdisc->red.band[i].prev_stats,
+					   &qdisc->red.band[i].prev_xstats);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void
+nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+			    unsigned int queue)
+{
+	bool good_red, good_gred;
+	unsigned int i;
+
+	good_red = qdisc->type == NFP_QDISC_RED &&
+		   qdisc->params_ok &&
+		   qdisc->use_cnt == 1 &&
+		   !alink->has_prio &&
+		   !qdisc->children[0];
+	good_gred = qdisc->type == NFP_QDISC_GRED &&
+		    qdisc->params_ok &&
+		    qdisc->use_cnt == 1;
+	qdisc->offload_mark = good_red || good_gred;
+
+	/* If we are starting offload init prev_stats */
+	if (qdisc->offload_mark && !qdisc->offloaded)
+		if (nfp_abm_stats_init(alink, qdisc, queue))
+			qdisc->offload_mark = false;
+
+	if (!qdisc->offload_mark)
+		return;
+
+	for (i = 0; i < alink->abm->num_bands; i++) {
+		enum nfp_abm_q_action act;
+
+		nfp_abm_ctrl_set_q_lvl(alink, i, queue,
+				       qdisc->red.band[i].threshold);
+		act = qdisc->red.band[i].ecn ?
+			NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
+		nfp_abm_ctrl_set_q_act(alink, i, queue, act);
+	}
+}
+
+static void
+nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+	unsigned int i;
+
+	qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
+	if (!qdisc->offload_mark)
+		return;
+
+	for (i = 0; i < alink->total_queues; i++) {
+		struct nfp_qdisc *child = qdisc->children[i];
+
+		if (!nfp_abm_qdisc_child_valid(qdisc, i))
+			continue;
+
+		nfp_abm_offload_compile_red(alink, child, i);
+	}
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
+{
+	struct nfp_abm *abm = alink->abm;
+	struct radix_tree_iter iter;
+	struct nfp_qdisc *qdisc;
+	void __rcu **slot;
+	size_t i;
+
+	/* Mark all thresholds as unconfigured */
+	for (i = 0; i < abm->num_bands; i++)
+		__bitmap_set(abm->threshold_undef,
+			     i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
+			     alink->total_queues);
+
+	/* Clear offload marks */
+	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+		qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+		qdisc->offload_mark = false;
+	}
+
+	if (alink->root_qdisc)
+		nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
+
+	/* Refresh offload status */
+	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+		qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+		if (!qdisc->offload_mark && qdisc->offloaded)
+			nfp_abm_qdisc_offload_stop(alink, qdisc);
+		qdisc->offloaded = qdisc->offload_mark;
+	}
+
+	/* Reset the unconfigured thresholds */
+	for (i = 0; i < abm->num_thresholds; i++)
+		if (test_bit(i, abm->threshold_undef))
+			__nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+	__nfp_abm_stats_update(alink, ktime_get());
+}
+
+static void
+nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+		       struct nfp_qdisc *qdisc)
+{
+	struct radix_tree_iter iter;
+	unsigned int mq_refs = 0;
+	void __rcu **slot;
+
+	if (!qdisc->use_cnt)
+		return;
+	/* MQ doesn't notify well on destruction, we need special handling of
+	 * MQ's children.
+	 */
+	if (qdisc->type == NFP_QDISC_MQ &&
+	    qdisc == alink->root_qdisc &&
+	    netdev->reg_state == NETREG_UNREGISTERING)
+		return;
+
+	/* Count refs held by MQ instances and clear pointers */
+	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+		struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
+		unsigned int i;
+
+		if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
+			continue;
+		for (i = 0; i < mq->num_children; i++)
+			if (mq->children[i] == qdisc) {
+				mq->children[i] = NULL;
+				mq_refs++;
+			}
+	}
+
+	WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
+	     qdisc->use_cnt, mq_refs);
+}
+
+static void
+nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
+		   struct nfp_qdisc *qdisc)
+{
+	struct nfp_port *port = nfp_port_from_netdev(netdev);
+
+	if (!qdisc)
+		return;
+	nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
+	WARN_ON(radix_tree_delete(&alink->qdiscs,
+				  TC_H_MAJ(qdisc->handle)) != qdisc);
+
+	kfree(qdisc->children);
+	kfree(qdisc);
+
+	port->tc_offload_cnt--;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
+		    enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+		    unsigned int children)
+{
+	struct nfp_port *port = nfp_port_from_netdev(netdev);
+	struct nfp_qdisc *qdisc;
+	int err;
+
+	qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
+	if (!qdisc)
+		return NULL;
+
+	if (children) {
+		qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
+		if (!qdisc->children)
+			goto err_free_qdisc;
+	}
+
+	qdisc->netdev = netdev;
+	qdisc->type = type;
+	qdisc->parent_handle = parent_handle;
+	qdisc->handle = handle;
+	qdisc->num_children = children;
+
+	err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
+	if (err) {
+		nfp_err(alink->abm->app->cpp,
+			"Qdisc insertion into radix tree failed: %d\n", err);
+		goto err_free_child_tbl;
+	}
+
+	port->tc_offload_cnt++;
+	return qdisc;
+
+err_free_child_tbl:
+	kfree(qdisc->children);
+err_free_qdisc:
+	kfree(qdisc);
+	return NULL;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
+{
+	return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
+}
+
+static int
+nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+		      enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+		      unsigned int children, struct nfp_qdisc **qdisc)
+{
+	*qdisc = nfp_abm_qdisc_find(alink, handle);
+	if (*qdisc) {
+		if (WARN_ON((*qdisc)->type != type))
+			return -EINVAL;
+		return 1;
+	}
+
+	*qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
+				     children);
+	return *qdisc ? 0 : -ENOMEM;
+}
+
+static void
+nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+		      u32 handle)
+{
+	struct nfp_qdisc *qdisc;
+
+	qdisc = nfp_abm_qdisc_find(alink, handle);
+	if (!qdisc)
+		return;
+
+	/* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
+	if (alink->root_qdisc == qdisc)
+		qdisc->use_cnt--;
+
+	nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
+	nfp_abm_qdisc_free(netdev, alink, qdisc);
+
+	if (alink->root_qdisc == qdisc) {
+		alink->root_qdisc = NULL;
+		/* Only root change matters, other changes are acted upon on
+		 * the graft notification.
+		 */
+		nfp_abm_qdisc_offload_update(alink);
+	}
+}
+
+static int
+nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
+		    unsigned int id)
+{
+	struct nfp_qdisc *parent, *child;
+
+	parent = nfp_abm_qdisc_find(alink, handle);
+	if (!parent)
+		return 0;
+
+	if (WARN(id >= parent->num_children,
+		 "graft child out of bound %d >= %d\n",
+		 id, parent->num_children))
+		return -EINVAL;
+
+	nfp_abm_qdisc_unlink_children(parent, id, id + 1);
+
+	child = nfp_abm_qdisc_find(alink, child_handle);
+	if (child)
+		child->use_cnt++;
+	else
+		child = NFP_QDISC_UNTRACKED;
+	parent->children[id] = child;
+
+	nfp_abm_qdisc_offload_update(alink);
+
+	return 0;
+}
+
+static void
+nfp_abm_stats_calculate(struct nfp_alink_stats *new,
+			struct nfp_alink_stats *old,
+			struct gnet_stats_basic_packed *bstats,
+			struct gnet_stats_queue *qstats)
+{
+	_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
+		       new->tx_pkts - old->tx_pkts);
+	qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+	qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+	qstats->overlimits += new->overlimits - old->overlimits;
+	qstats->drops += new->drops - old->drops;
+}
+
+static void
+nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
+			    struct nfp_alink_xstats *old,
+			    struct red_stats *stats)
+{
+	stats->forced_mark += new->ecn_marked - old->ecn_marked;
+	stats->pdrop += new->pdrop - old->pdrop;
+}
+
+static int
+nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
+		   struct tc_gred_qopt_offload_stats *stats)
+{
+	struct nfp_qdisc *qdisc;
+	unsigned int i;
+
+	nfp_abm_stats_update(alink);
+
+	qdisc = nfp_abm_qdisc_find(alink, handle);
+	if (!qdisc)
+		return -EOPNOTSUPP;
+	/* If the qdisc offload has stopped we may need to adjust the backlog
+	 * counters back so carry on even if qdisc is not currently offloaded.
+	 */
+
+	for (i = 0; i < qdisc->red.num_bands; i++) {
+		if (!stats->xstats[i])
+			continue;
+
+		nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
+					&qdisc->red.band[i].prev_stats,
+					&stats->bstats[i], &stats->qstats[i]);
+		qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
+
+		nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
+					    &qdisc->red.band[i].prev_xstats,
+					    stats->xstats[i]);
+		qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
+	}
+
+	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_gred_check_params(struct nfp_abm_link *alink,
+			  struct tc_gred_qopt_offload *opt)
+{
+	struct nfp_cpp *cpp = alink->abm->app->cpp;
+	struct nfp_abm *abm = alink->abm;
+	unsigned int i;
+
+	if (opt->set.grio_on || opt->set.wred_on) {
+		nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
+			 opt->parent, opt->handle);
+		return false;
+	}
+	if (opt->set.dp_def != alink->def_band) {
+		nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
+			 alink->def_band, opt->parent, opt->handle);
+		return false;
+	}
+	if (opt->set.dp_cnt != abm->num_bands) {
+		nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
+			 abm->num_bands, opt->parent, opt->handle);
+		return false;
+	}
+
+	for (i = 0; i < abm->num_bands; i++) {
+		struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
+
+		if (!band->present)
+			return false;
+		if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
+			nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
+				 opt->parent, opt->handle, i);
+			return false;
+		}
+		if (band->is_ecn && !nfp_abm_has_mark(abm)) {
+			nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
+				 opt->parent, opt->handle, i);
+			return false;
+		}
+		if (band->is_harddrop) {
+			nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
+				 opt->parent, opt->handle, i);
+			return false;
+		}
+		if (band->min != band->max) {
+			nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
+				 opt->parent, opt->handle, i);
+			return false;
+		}
+		if (band->min > S32_MAX) {
+			nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
+				 band->min, S32_MAX, opt->parent, opt->handle,
+				 i);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static int
+nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+		     struct tc_gred_qopt_offload *opt)
+{
+	struct nfp_qdisc *qdisc;
+	unsigned int i;
+	int ret;
+
+	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
+				    opt->handle, 0, &qdisc);
+	if (ret < 0)
+		return ret;
+
+	qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
+	if (qdisc->params_ok) {
+		qdisc->red.num_bands = opt->set.dp_cnt;
+		for (i = 0; i < qdisc->red.num_bands; i++) {
+			qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
+			qdisc->red.band[i].threshold = opt->set.tab[i].min;
+		}
+	}
+
+	if (qdisc->use_cnt)
+		nfp_abm_qdisc_offload_update(alink);
+
+	return 0;
+}
+
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+			  struct tc_gred_qopt_offload *opt)
+{
+	switch (opt->command) {
+	case TC_GRED_REPLACE:
+		return nfp_abm_gred_replace(netdev, alink, opt);
+	case TC_GRED_DESTROY:
+		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+		return 0;
+	case TC_GRED_STATS:
+		return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+	struct nfp_qdisc *qdisc;
+
+	nfp_abm_stats_update(alink);
+
+	qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+	if (!qdisc || !qdisc->offloaded)
+		return -EOPNOTSUPP;
+
+	nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
+				    &qdisc->red.band[0].prev_xstats,
+				    opt->xstats);
+	qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
+	return 0;
+}
+
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
+		  struct tc_qopt_offload_stats *stats)
+{
+	struct nfp_qdisc *qdisc;
+
+	nfp_abm_stats_update(alink);
+
+	qdisc = nfp_abm_qdisc_find(alink, handle);
+	if (!qdisc)
+		return -EOPNOTSUPP;
+	/* If the qdisc offload has stopped we may need to adjust the backlog
+	 * counters back so carry on even if qdisc is not currently offloaded.
+	 */
+
+	nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
+				&qdisc->red.band[0].prev_stats,
+				stats->bstats, stats->qstats);
+	qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
+
+	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_red_check_params(struct nfp_abm_link *alink,
+			 struct tc_red_qopt_offload *opt)
+{
+	struct nfp_cpp *cpp = alink->abm->app->cpp;
+	struct nfp_abm *abm = alink->abm;
+
+	if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
+		nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
+			 opt->parent, opt->handle);
+		return false;
+	}
+	if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
+		nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
+			 opt->parent, opt->handle);
+		return false;
+	}
+	if (opt->set.is_harddrop) {
+		nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
+			 opt->parent, opt->handle);
+		return false;
+	}
+	if (opt->set.min != opt->set.max) {
+		nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
+			 opt->parent, opt->handle);
+		return false;
+	}
+	if (opt->set.min > NFP_ABM_LVL_INFINITY) {
+		nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
+			 opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
+			 opt->handle);
+		return false;
+	}
+
+	return true;
+}
+
+static int
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+		    struct tc_red_qopt_offload *opt)
+{
+	struct nfp_qdisc *qdisc;
+	int ret;
+
+	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
+				    opt->handle, 1, &qdisc);
+	if (ret < 0)
+		return ret;
+
+	/* If limit != 0 child gets reset */
+	if (opt->set.limit) {
+		if (nfp_abm_qdisc_child_valid(qdisc, 0))
+			qdisc->children[0]->use_cnt--;
+		qdisc->children[0] = NULL;
+	} else {
+		/* Qdisc was just allocated without a limit will use noop_qdisc,
+		 * i.e. a block hole.
+		 */
+		if (!ret)
+			qdisc->children[0] = NFP_QDISC_UNTRACKED;
+	}
+
+	qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
+	if (qdisc->params_ok) {
+		qdisc->red.num_bands = 1;
+		qdisc->red.band[0].ecn = opt->set.is_ecn;
+		qdisc->red.band[0].threshold = opt->set.min;
+	}
+
+	if (qdisc->use_cnt == 1)
+		nfp_abm_qdisc_offload_update(alink);
+
+	return 0;
+}
+
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+			 struct tc_red_qopt_offload *opt)
+{
+	switch (opt->command) {
+	case TC_RED_REPLACE:
+		return nfp_abm_red_replace(netdev, alink, opt);
+	case TC_RED_DESTROY:
+		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+		return 0;
+	case TC_RED_STATS:
+		return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
+	case TC_RED_XSTATS:
+		return nfp_abm_red_xstats(alink, opt);
+	case TC_RED_GRAFT:
+		return nfp_abm_qdisc_graft(alink, opt->handle,
+					   opt->child_handle, 0);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int
+nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
+		  struct tc_mq_qopt_offload *opt)
+{
+	struct nfp_qdisc *qdisc;
+	int ret;
+
+	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
+				    TC_H_ROOT, opt->handle, alink->total_queues,
+				    &qdisc);
+	if (ret < 0)
+		return ret;
+
+	qdisc->params_ok = true;
+	qdisc->offloaded = true;
+	nfp_abm_qdisc_offload_update(alink);
+	return 0;
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
+		 struct tc_qopt_offload_stats *stats)
+{
+	struct nfp_qdisc *qdisc, *red;
+	unsigned int i, j;
+
+	qdisc = nfp_abm_qdisc_find(alink, handle);
+	if (!qdisc)
+		return -EOPNOTSUPP;
+
+	nfp_abm_stats_update(alink);
+
+	/* MQ stats are summed over the children in the core, so we need
+	 * to add up the unreported child values.
+	 */
+	memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
+	memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
+
+	for (i = 0; i < qdisc->num_children; i++) {
+		if (!nfp_abm_qdisc_child_valid(qdisc, i))
+			continue;
+
+		if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
+			continue;
+		red = qdisc->children[i];
+
+		for (j = 0; j < red->red.num_bands; j++) {
+			nfp_abm_stats_propagate(&qdisc->mq.stats,
+						&red->red.band[j].stats);
+			nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
+						&red->red.band[j].prev_stats);
+		}
+	}
+
+	nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
+				stats->bstats, stats->qstats);
+
+	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+			struct tc_mq_qopt_offload *opt)
+{
+	switch (opt->command) {
+	case TC_MQ_CREATE:
+		return nfp_abm_mq_create(netdev, alink, opt);
+	case TC_MQ_DESTROY:
+		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+		return 0;
+	case TC_MQ_STATS:
+		return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
+	case TC_MQ_GRAFT:
+		return nfp_abm_qdisc_graft(alink, opt->handle,
+					   opt->graft_params.child_handle,
+					   opt->graft_params.queue);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+		       struct tc_root_qopt_offload *opt)
+{
+	if (opt->ingress)
+		return -EOPNOTSUPP;
+	if (alink->root_qdisc)
+		alink->root_qdisc->use_cnt--;
+	alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+	if (alink->root_qdisc)
+		alink->root_qdisc->use_cnt++;
+
+	nfp_abm_qdisc_offload_update(alink);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 244dc26..8d54b36 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,7 +2,6 @@
 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
 
 #include <linux/bitfield.h>
-#include <net/geneve.h>
 #include <net/pkt_cls.h>
 #include <net/switchdev.h>
 #include <net/tc_act/tc_csum.h>
@@ -91,21 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
 	return act_size;
 }
 
-static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
-					 enum nfp_flower_tun_type tun_type)
-{
-	if (!out_dev->rtnl_link_ops)
-		return false;
-
-	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
-		return tun_type == NFP_FL_TUNNEL_VXLAN;
-
-	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
-		return tun_type == NFP_FL_TUNNEL_GENEVE;
-
-	return false;
-}
-
 static int
 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
 	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
@@ -151,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
 		/* Set action output parameters. */
 		output->flags = cpu_to_be16(tmp_flags);
 
-		/* Only offload if egress ports are on the same device as the
-		 * ingress port.
-		 */
-		if (!switchdev_port_same_parent_id(in_dev, out_dev))
-			return -EOPNOTSUPP;
+		if (nfp_netdev_is_nfp_repr(in_dev)) {
+			/* Confirm ingress and egress are on same device. */
+			if (!switchdev_port_same_parent_id(in_dev, out_dev))
+				return -EOPNOTSUPP;
+		}
+
 		if (!nfp_netdev_is_nfp_repr(out_dev))
 			return -EOPNOTSUPP;
 
@@ -384,10 +369,21 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
 	return 0;
 }
 
+struct ipv4_ttl_word {
+	__u8	ttl;
+	__u8	protocol;
+	__sum16	check;
+};
+
 static int
 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
-	       struct nfp_fl_set_ip4_addrs *set_ip_addr)
+	       struct nfp_fl_set_ip4_addrs *set_ip_addr,
+	       struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
 {
+	struct ipv4_ttl_word *ttl_word_mask;
+	struct ipv4_ttl_word *ttl_word;
+	struct iphdr *tos_word_mask;
+	struct iphdr *tos_word;
 	__be32 exact, mask;
 
 	/* We are expecting tcf_pedit to return a big endian value */
@@ -402,20 +398,53 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
 		set_ip_addr->ipv4_dst_mask |= mask;
 		set_ip_addr->ipv4_dst &= ~mask;
 		set_ip_addr->ipv4_dst |= exact & mask;
+		set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+		set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+					   NFP_FL_LW_SIZ;
 		break;
 	case offsetof(struct iphdr, saddr):
 		set_ip_addr->ipv4_src_mask |= mask;
 		set_ip_addr->ipv4_src &= ~mask;
 		set_ip_addr->ipv4_src |= exact & mask;
+		set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+		set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+					   NFP_FL_LW_SIZ;
+		break;
+	case offsetof(struct iphdr, ttl):
+		ttl_word_mask = (struct ipv4_ttl_word *)&mask;
+		ttl_word = (struct ipv4_ttl_word *)&exact;
+
+		if (ttl_word_mask->protocol || ttl_word_mask->check)
+			return -EOPNOTSUPP;
+
+		set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
+		set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
+		set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
+		set_ip_ttl_tos->head.jump_id =
+			NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+		set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+					      NFP_FL_LW_SIZ;
+		break;
+	case round_down(offsetof(struct iphdr, tos), 4):
+		tos_word_mask = (struct iphdr *)&mask;
+		tos_word = (struct iphdr *)&exact;
+
+		if (tos_word_mask->version || tos_word_mask->ihl ||
+		    tos_word_mask->tot_len)
+			return -EOPNOTSUPP;
+
+		set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
+		set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
+		set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
+		set_ip_ttl_tos->head.jump_id =
+			NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+		set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+					      NFP_FL_LW_SIZ;
 		break;
 	default:
 		return -EOPNOTSUPP;
 	}
 
-	set_ip_addr->reserved = cpu_to_be16(0);
-	set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
-	set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
-
 	return 0;
 }
 
@@ -432,12 +461,57 @@ nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
 }
 
+struct ipv6_hop_limit_word {
+	__be16 payload_len;
+	u8 nexthdr;
+	u8 hop_limit;
+};
+
+static int
+nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
+				    struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+{
+	struct ipv6_hop_limit_word *fl_hl_mask;
+	struct ipv6_hop_limit_word *fl_hl;
+
+	switch (off) {
+	case offsetof(struct ipv6hdr, payload_len):
+		fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
+		fl_hl = (struct ipv6_hop_limit_word *)&exact;
+
+		if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+			return -EOPNOTSUPP;
+
+		ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
+		ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
+		ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
+					    fl_hl_mask->hop_limit;
+		break;
+	case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
+		if (mask & ~IPV6_FLOW_LABEL_MASK ||
+		    exact & ~IPV6_FLOW_LABEL_MASK)
+			return -EOPNOTSUPP;
+
+		ip_hl_fl->ipv6_label_mask |= mask;
+		ip_hl_fl->ipv6_label &= ~mask;
+		ip_hl_fl->ipv6_label |= exact & mask;
+		break;
+	}
+
+	ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
+	ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
+
+	return 0;
+}
+
 static int
 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
 	       struct nfp_fl_set_ipv6_addr *ip_dst,
-	       struct nfp_fl_set_ipv6_addr *ip_src)
+	       struct nfp_fl_set_ipv6_addr *ip_src,
+	       struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
 {
 	__be32 exact, mask;
+	int err = 0;
 	u8 word;
 
 	/* We are expecting tcf_pedit to return a big endian value */
@@ -448,7 +522,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
 		return -EOPNOTSUPP;
 
 	if (off < offsetof(struct ipv6hdr, saddr)) {
-		return -EOPNOTSUPP;
+		err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
+							  ip_hl_fl);
 	} else if (off < offsetof(struct ipv6hdr, daddr)) {
 		word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -462,7 +537,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
 		return -EOPNOTSUPP;
 	}
 
-	return 0;
+	return err;
 }
 
 static int
@@ -513,6 +588,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
 	     char *nfp_action, int *a_len, u32 *csum_updated)
 {
 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+	struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
+	struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
 	struct nfp_fl_set_ip4_addrs set_ip_addr;
 	struct nfp_fl_set_tport set_tport;
 	struct nfp_fl_set_eth set_eth;
@@ -522,6 +599,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
 	u32 offset, cmd;
 	u8 ip_proto = 0;
 
+	memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
+	memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
@@ -542,11 +621,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
 			break;
 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
-			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
+					     &set_ip_ttl_tos);
 			break;
 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
-					     &set_ip6_src);
+					     &set_ip6_src, &set_ip6_tc_hl_fl);
 			break;
 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
@@ -577,6 +657,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
 		memcpy(nfp_action, &set_eth, act_size);
 		*a_len += act_size;
 	}
+	if (set_ip_ttl_tos.head.len_lw) {
+		nfp_action += act_size;
+		act_size = sizeof(set_ip_ttl_tos);
+		memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+		*a_len += act_size;
+
+		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+				nfp_fl_csum_l4_to_flag(ip_proto);
+	}
 	if (set_ip_addr.head.len_lw) {
 		nfp_action += act_size;
 		act_size = sizeof(set_ip_addr);
@@ -587,6 +677,15 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
 				nfp_fl_csum_l4_to_flag(ip_proto);
 	}
+	if (set_ip6_tc_hl_fl.head.len_lw) {
+		nfp_action += act_size;
+		act_size = sizeof(set_ip6_tc_hl_fl);
+		memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+		*a_len += act_size;
+
+		/* Hardware will automatically fix TCP/UDP checksum. */
+		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
+	}
 	if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
 		/* TC compiles set src and dst IPv6 address as a single action,
 		 * the hardware requires this to be 2 separate actions.
@@ -728,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
 		*a_len += sizeof(struct nfp_fl_push_vlan);
 	} else if (is_tcf_tunnel_set(a)) {
 		struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
-		struct nfp_repr *repr = netdev_priv(netdev);
 
-		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+		*tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
 		if (*tun_type == NFP_FL_TUNNEL_NONE)
 			return -EOPNOTSUPP;
 
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 29d673a..15f41cf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
 #include <linux/skbuff.h>
 #include <linux/types.h>
 #include <net/geneve.h>
+#include <net/vxlan.h>
 
 #include "../nfp_app.h"
 #include "../nfpcore/nfp_cpp.h"
@@ -65,8 +66,10 @@
 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL	6
 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET	7
 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS	9
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS	10
 #define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC	11
 #define NFP_FL_ACTION_OPCODE_SET_IPV6_DST	12
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL	13
 #define NFP_FL_ACTION_OPCODE_SET_UDP		14
 #define NFP_FL_ACTION_OPCODE_SET_TCP		15
 #define NFP_FL_ACTION_OPCODE_PRE_LAG		16
@@ -82,6 +85,8 @@
 #define NFP_FL_PUSH_VLAN_CFI		BIT(12)
 #define NFP_FL_PUSH_VLAN_VID		GENMASK(11, 0)
 
+#define IPV6_FLOW_LABEL_MASK		cpu_to_be32(0x000fffff)
+
 /* LAG ports */
 #define NFP_FL_LAG_OUT			0xC0DE0000
 
@@ -125,6 +130,26 @@ struct nfp_fl_set_ip4_addrs {
 	__be32 ipv4_dst;
 };
 
+struct nfp_fl_set_ip4_ttl_tos {
+	struct nfp_fl_act_head head;
+	u8 ipv4_ttl_mask;
+	u8 ipv4_tos_mask;
+	u8 ipv4_ttl;
+	u8 ipv4_tos;
+	__be16 reserved;
+};
+
+struct nfp_fl_set_ipv6_tc_hl_fl {
+	struct nfp_fl_act_head head;
+	u8 ipv6_tc_mask;
+	u8 ipv6_hop_limit_mask;
+	__be16 reserved;
+	u8 ipv6_tc;
+	u8 ipv6_hop_limit;
+	__be32 ipv6_label_mask;
+	__be32 ipv6_label;
+};
+
 struct nfp_fl_set_ipv6_addr {
 	struct nfp_fl_act_head head;
 	__be16 reserved;
@@ -475,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
 	return skb->len - NFP_FLOWER_CMSG_HLEN;
 }
 
+static inline bool
+nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
+			     enum nfp_flower_tun_type tun_type)
+{
+	if (netif_is_vxlan(netdev))
+		return tun_type == NFP_FL_TUNNEL_VXLAN;
+	if (netif_is_geneve(netdev))
+		return tun_type == NFP_FL_TUNNEL_GENEVE;
+
+	return false;
+}
+
+static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
+{
+	if (!netdev->rtnl_link_ops)
+		return false;
+	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+		return true;
+	if (netif_is_vxlan(netdev))
+		return true;
+	if (netif_is_geneve(netdev))
+		return true;
+
+	return false;
+}
+
 struct sk_buff *
 nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
 void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 81dcf5b..5db838f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -472,17 +472,25 @@ nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
 }
 
-static int
+static void
 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
 				 struct net_device *master)
 {
 	struct nfp_fl_lag_group *group;
+	struct nfp_flower_priv *priv;
+
+	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+	if (!netif_is_bond_master(master))
+		return;
 
 	mutex_lock(&lag->lock);
 	group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
 	if (!group) {
 		mutex_unlock(&lag->lock);
-		return -ENOENT;
+		nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
+			 netdev_name(master));
+		return;
 	}
 
 	group->to_remove = true;
@@ -490,7 +498,6 @@ nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
 	mutex_unlock(&lag->lock);
 
 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
-	return 0;
 }
 
 static int
@@ -575,7 +582,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
 	return 0;
 }
 
-static int
+static void
 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
 			  struct netdev_notifier_changelowerstate_info *info)
 {
@@ -586,18 +593,18 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
 	unsigned long *flags;
 
 	if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
-		return 0;
+		return;
 
 	lag_lower_info = info->lower_state_info;
 	if (!lag_lower_info)
-		return 0;
+		return;
 
 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
 	repr = netdev_priv(netdev);
 
 	/* Verify that the repr is associated with this app. */
 	if (repr->app != priv->app)
-		return 0;
+		return;
 
 	repr_priv = repr->app_priv;
 	flags = &repr_priv->lag_port_flags;
@@ -617,20 +624,15 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
 	mutex_unlock(&lag->lock);
 
 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
-	return 0;
 }
 
-static int
-nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
-			void *ptr)
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+				struct net_device *netdev,
+				unsigned long event, void *ptr)
 {
-	struct net_device *netdev;
-	struct nfp_fl_lag *lag;
+	struct nfp_fl_lag *lag = &priv->nfp_lag;
 	int err;
 
-	netdev = netdev_notifier_info_to_dev(ptr);
-	lag = container_of(nb, struct nfp_fl_lag, lag_nb);
-
 	switch (event) {
 	case NETDEV_CHANGEUPPER:
 		err = nfp_fl_lag_changeupper_event(lag, ptr);
@@ -638,17 +640,11 @@ nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
 			return NOTIFY_BAD;
 		return NOTIFY_OK;
 	case NETDEV_CHANGELOWERSTATE:
-		err = nfp_fl_lag_changels_event(lag, netdev, ptr);
-		if (err)
-			return NOTIFY_BAD;
+		nfp_fl_lag_changels_event(lag, netdev, ptr);
 		return NOTIFY_OK;
 	case NETDEV_UNREGISTER:
-		if (netif_is_bond_master(netdev)) {
-			err = nfp_fl_lag_schedule_group_delete(lag, netdev);
-			if (err)
-				return NOTIFY_BAD;
-			return NOTIFY_OK;
-		}
+		nfp_fl_lag_schedule_group_delete(lag, netdev);
+		return NOTIFY_OK;
 	}
 
 	return NOTIFY_DONE;
@@ -673,8 +669,6 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag)
 
 	/* 0 is a reserved batch version so increment to first valid value. */
 	nfp_fl_increment_version(lag);
-
-	lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
 }
 
 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 3a54728..5059110 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
 	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
 }
 
-static int
-nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
-{
-	return tc_setup_cb_egdev_register(netdev,
-					  nfp_flower_setup_tc_egress_cb,
-					  netdev_priv(netdev));
-}
-
 static void
 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
 {
 	struct nfp_repr *repr = netdev_priv(netdev);
 
 	kfree(repr->app_priv);
-
-	tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
-				     netdev_priv(netdev));
 }
 
 static void
@@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
 		goto err_cleanup_metadata;
 	}
 
+	INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
+
 	return 0;
 
 err_cleanup_metadata:
@@ -661,10 +652,6 @@ static int nfp_flower_start(struct nfp_app *app)
 		err = nfp_flower_lag_reset(&app_priv->nfp_lag);
 		if (err)
 			return err;
-
-		err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
-		if (err)
-			return err;
 	}
 
 	return nfp_tunnel_config_start(app);
@@ -672,14 +659,29 @@ static int nfp_flower_start(struct nfp_app *app)
 
 static void nfp_flower_stop(struct nfp_app *app)
 {
-	struct nfp_flower_priv *app_priv = app->priv;
-
-	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
-		unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
-
 	nfp_tunnel_config_stop(app);
 }
 
+static int
+nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
+			unsigned long event, void *ptr)
+{
+	struct nfp_flower_priv *app_priv = app->priv;
+	int ret;
+
+	if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+		ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
+		if (ret & NOTIFY_STOP_MASK)
+			return ret;
+	}
+
+	ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
+	if (ret & NOTIFY_STOP_MASK)
+		return ret;
+
+	return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
+}
+
 const struct nfp_app_type app_flower = {
 	.id		= NFP_APP_FLOWER_NIC,
 	.name		= "flower",
@@ -698,7 +700,6 @@ const struct nfp_app_type app_flower = {
 	.vnic_init	= nfp_flower_vnic_init,
 	.vnic_clean	= nfp_flower_vnic_clean,
 
-	.repr_init	= nfp_flower_repr_netdev_init,
 	.repr_preclean	= nfp_flower_repr_netdev_preclean,
 	.repr_clean	= nfp_flower_repr_netdev_clean,
 
@@ -708,6 +709,8 @@ const struct nfp_app_type app_flower = {
 	.start		= nfp_flower_start,
 	.stop		= nfp_flower_stop,
 
+	.netdev_event	= nfp_flower_netdev_event,
+
 	.ctrl_msg_rx	= nfp_flower_cmsg_rx,
 
 	.sriov_enable	= nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 90045ba..b858bac 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
 struct net_device;
 struct nfp_app;
 
-#define NFP_FL_STATS_CTX_DONT_CARE	cpu_to_be32(0xffffffff)
 #define NFP_FL_STATS_ELEM_RS		FIELD_SIZEOF(struct nfp_fl_stats_id, \
 						     init_unalloc)
 #define NFP_FLOWER_MASK_ENTRY_RS	256
@@ -72,7 +71,6 @@ struct nfp_mtu_conf {
 
 /**
  * struct nfp_fl_lag - Flower APP priv data for link aggregation
- * @lag_nb:		Notifier to track master/slave events
  * @work:		Work queue for writing configs to the HW
  * @lock:		Lock to protect lag_group_list
  * @group_list:		List of all master/slave groups offloaded
@@ -85,7 +83,6 @@ struct nfp_mtu_conf {
  *			retransmission
  */
 struct nfp_fl_lag {
-	struct notifier_block lag_nb;
 	struct delayed_work work;
 	struct mutex lock;
 	struct list_head group_list;
@@ -126,13 +123,13 @@ struct nfp_fl_lag {
  * @nfp_neigh_off_lock:	Lock for the neighbour address list
  * @nfp_mac_off_ids:	IDA to manage id assignment for offloaded macs
  * @nfp_mac_off_count:	Number of MACs in address list
- * @nfp_tun_mac_nb:	Notifier to monitor link state
  * @nfp_tun_neigh_nb:	Notifier to monitor neighbour state
  * @reify_replies:	atomically stores the number of replies received
  *			from firmware for repr reify
  * @reify_wait_queue:	wait queue for repr reify response counting
  * @mtu_conf:		Configuration of repr MTU value
  * @nfp_lag:		Link aggregation data block
+ * @indr_block_cb_priv:	List of priv data passed to indirect block cbs
  */
 struct nfp_flower_priv {
 	struct nfp_app *app;
@@ -160,12 +157,12 @@ struct nfp_flower_priv {
 	spinlock_t nfp_neigh_off_lock;
 	struct ida nfp_mac_off_ids;
 	int nfp_mac_off_count;
-	struct notifier_block nfp_tun_mac_nb;
 	struct notifier_block nfp_tun_neigh_nb;
 	atomic_t reify_replies;
 	wait_queue_head_t reify_wait_queue;
 	struct nfp_mtu_conf mtu_conf;
 	struct nfp_fl_lag nfp_lag;
+	struct list_head indr_block_cb_priv;
 };
 
 /**
@@ -209,7 +206,6 @@ struct nfp_fl_payload {
 	char *unmasked_data;
 	char *mask_data;
 	char *action_data;
-	bool ingress_offload;
 };
 
 extern const struct rhashtable_params nfp_flower_table_params;
@@ -226,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
 
 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
 			enum tc_setup_type type, void *type_data);
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+				  struct tc_cls_flower_offload *flow,
 				  struct nfp_fl_key_ls *key_ls,
 				  struct net_device *netdev,
 				  struct nfp_fl_payload *nfp_flow,
@@ -244,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
 
 struct nfp_fl_payload *
 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
-			   struct net_device *netdev, __be32 host_ctx);
+			   struct net_device *netdev);
 struct nfp_fl_payload *
 nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
 
@@ -252,21 +249,28 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
 
 int nfp_tunnel_config_start(struct nfp_app *app);
 void nfp_tunnel_config_stop(struct nfp_app *app);
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+				 struct net_device *netdev,
+				 unsigned long event, void *ptr);
 void nfp_tunnel_write_macs(struct nfp_app *app);
 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
-				  void *cb_priv);
 void nfp_flower_lag_init(struct nfp_fl_lag *lag);
 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
 int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+				struct net_device *netdev,
+				unsigned long event, void *ptr);
 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
 				       struct net_device *master,
 				       struct nfp_fl_pre_lag *pre_act);
 int nfp_flower_lag_get_output_id(struct nfp_app *app,
 				 struct net_device *master);
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+				       struct net_device *netdev,
+				       unsigned long event);
 
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e54fb60..cdf7559 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
 		return 0;
 	}
 
-	if (tun_type)
+	if (tun_type) {
 		frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
-	else
+	} else {
+		if (!cmsg_port)
+			return -EOPNOTSUPP;
 		frame->in_port = cpu_to_be32(cmsg_port);
+	}
 
 	return 0;
 }
@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
 	}
 }
 
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+				  struct tc_cls_flower_offload *flow,
 				  struct nfp_fl_key_ls *key_ls,
 				  struct net_device *netdev,
 				  struct nfp_fl_payload *nfp_flow,
 				  enum nfp_flower_tun_type tun_type)
 {
-	struct nfp_repr *netdev_repr;
+	u32 cmsg_port = 0;
 	int err;
 	u8 *ext;
 	u8 *msk;
 
+	if (nfp_netdev_is_nfp_repr(netdev))
+		cmsg_port = nfp_repr_get_port_id(netdev);
+
 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
 
@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 
 	/* Populate Exact Port data. */
 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
-				      nfp_repr_get_port_id(netdev),
-				      false, tun_type);
+				      cmsg_port, false, tun_type);
 	if (err)
 		return err;
 
 	/* Populate Mask Port Data. */
 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
-				      nfp_repr_get_port_id(netdev),
-				      true, tun_type);
+				      cmsg_port, true, tun_type);
 	if (err)
 		return err;
 
@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 		msk += sizeof(struct nfp_flower_ipv4_udp_tun);
 
 		/* Configure tunnel end point MAC. */
-		if (nfp_netdev_is_nfp_repr(netdev)) {
-			netdev_repr = netdev_priv(netdev);
-			nfp_tunnel_write_macs(netdev_repr->app);
+		nfp_tunnel_write_macs(app);
 
-			/* Store the tunnel destination in the rule data.
-			 * This must be present and be an exact match.
-			 */
-			nfp_flow->nfp_tun_ipv4_addr = tun_dst;
-			nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
-		}
+		/* Store the tunnel destination in the rule data.
+		 * This must be present and be an exact match.
+		 */
+		nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+		nfp_tunnel_add_ipv4_off(app, tun_dst);
 
 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
 			err = nfp_flower_compile_geneve_opt(ext, flow, false);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 48729bf..573a440 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -21,7 +21,6 @@ struct nfp_mask_id_table {
 struct nfp_fl_flow_table_cmp_arg {
 	struct net_device *netdev;
 	unsigned long cookie;
-	__be32 host_ctx;
 };
 
 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
@@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
 /* Must be called with either RTNL or rcu_read_lock */
 struct nfp_fl_payload *
 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
-			   struct net_device *netdev, __be32 host_ctx)
+			   struct net_device *netdev)
 {
 	struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
 	struct nfp_flower_priv *priv = app->priv;
 
 	flower_cmp_arg.netdev = netdev;
 	flower_cmp_arg.cookie = tc_flower_cookie;
-	flower_cmp_arg.host_ctx = host_ctx;
 
 	return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
 				      nfp_flower_table_params);
@@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
 
 	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
 	nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+	nfp_flow->ingress_dev = netdev;
 
 	new_mask_id = 0;
 	if (!nfp_check_mask_add(app, nfp_flow->mask_data,
@@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
 	priv->stats[stats_cxt].bytes = 0;
 	priv->stats[stats_cxt].used = jiffies;
 
-	check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
-						 NFP_FL_STATS_CTX_DONT_CARE);
+	check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
 	if (check_entry) {
 		if (nfp_release_stats_entry(app, stats_cxt))
 			return -EINVAL;
@@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
 	const struct nfp_fl_payload *flow_entry = obj;
 
-	if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
-	    (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
-	     flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+	if (flow_entry->ingress_dev == cmp_arg->netdev)
 		return flow_entry->tc_flower_cookie != cmp_arg->cookie;
 
 	return 1;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 29c9542..545d941 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -56,11 +56,10 @@
 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
 
 static int
-nfp_flower_xmit_flow(struct net_device *netdev,
-		     struct nfp_fl_payload *nfp_flow, u8 mtype)
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+		     u8 mtype)
 {
 	u32 meta_len, key_len, mask_len, act_len, tot_len;
-	struct nfp_repr *priv = netdev_priv(netdev);
 	struct sk_buff *skb;
 	unsigned char *msg;
 
@@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
 
-	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
+	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 
@@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
 
-	nfp_ctrl_tx(priv->app->ctrl, skb);
+	nfp_ctrl_tx(app->ctrl, skb);
 
 	return 0;
 }
@@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
 
 static int
 nfp_flower_calculate_key_layers(struct nfp_app *app,
+				struct net_device *netdev,
 				struct nfp_fl_key_ls *ret_key_ls,
 				struct tc_cls_flower_offload *flow,
-				bool egress,
 				enum nfp_flower_tun_type *tun_type)
 {
 	struct flow_dissector_key_basic *mask_basic = NULL;
@@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
 			skb_flow_dissector_target(flow->dissector,
 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
 						  flow->key);
-		if (!egress)
-			return -EOPNOTSUPP;
 
 		if (mask_enc_ctl->addr_type != 0xffff ||
 		    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
 		default:
 			return -EOPNOTSUPP;
 		}
-	} else if (egress) {
-		/* Reject non tunnel matches offloaded to egress repr. */
-		return -EOPNOTSUPP;
+
+		/* Ensure the ingress netdev matches the expected tun type. */
+		if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
+			return -EOPNOTSUPP;
 	}
 
 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -374,7 +372,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
 }
 
 static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
 {
 	struct nfp_fl_payload *flow_pay;
 
@@ -398,7 +396,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
 
 	flow_pay->nfp_tun_ipv4_addr = 0;
 	flow_pay->meta.flags = 0;
-	flow_pay->ingress_offload = !egress;
 
 	return flow_pay;
 
@@ -416,7 +413,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
  * @app:	Pointer to the APP handle
  * @netdev:	netdev structure.
  * @flow:	TC flower classifier offload structure.
- * @egress:	NFP netdev is the egress.
  *
  * Adds a new flow to the repeated hash structure and action payload.
  *
@@ -424,46 +420,35 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
  */
 static int
 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
-		       struct tc_cls_flower_offload *flow, bool egress)
+		       struct tc_cls_flower_offload *flow)
 {
 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
-	struct nfp_port *port = nfp_port_from_netdev(netdev);
 	struct nfp_flower_priv *priv = app->priv;
 	struct nfp_fl_payload *flow_pay;
 	struct nfp_fl_key_ls *key_layer;
-	struct net_device *ingr_dev;
+	struct nfp_port *port = NULL;
 	int err;
 
-	ingr_dev = egress ? NULL : netdev;
-	flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
-					      NFP_FL_STATS_CTX_DONT_CARE);
-	if (flow_pay) {
-		/* Ignore as duplicate if it has been added by different cb. */
-		if (flow_pay->ingress_offload && egress)
-			return 0;
-		else
-			return -EOPNOTSUPP;
-	}
+	if (nfp_netdev_is_nfp_repr(netdev))
+		port = nfp_port_from_netdev(netdev);
 
 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
 	if (!key_layer)
 		return -ENOMEM;
 
-	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
 					      &tun_type);
 	if (err)
 		goto err_free_key_ls;
 
-	flow_pay = nfp_flower_allocate_new(key_layer, egress);
+	flow_pay = nfp_flower_allocate_new(key_layer);
 	if (!flow_pay) {
 		err = -ENOMEM;
 		goto err_free_key_ls;
 	}
 
-	flow_pay->ingress_dev = egress ? NULL : netdev;
-
-	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
-					    tun_type);
+	err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+					    flow_pay, tun_type);
 	if (err)
 		goto err_destroy_flow;
 
@@ -471,12 +456,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 	if (err)
 		goto err_destroy_flow;
 
-	err = nfp_compile_flow_metadata(app, flow, flow_pay,
-					flow_pay->ingress_dev);
+	err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
 	if (err)
 		goto err_destroy_flow;
 
-	err = nfp_flower_xmit_flow(netdev, flow_pay,
+	err = nfp_flower_xmit_flow(app, flow_pay,
 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
 	if (err)
 		goto err_destroy_flow;
@@ -487,7 +471,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 	if (err)
 		goto err_destroy_flow;
 
-	port->tc_offload_cnt++;
+	if (port)
+		port->tc_offload_cnt++;
 
 	/* Deallocate flow payload when flower rule has been destroyed. */
 	kfree(key_layer);
@@ -509,7 +494,6 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
  * @app:	Pointer to the APP handle
  * @netdev:	netdev structure.
  * @flow:	TC flower classifier offload structure
- * @egress:	Netdev is the egress dev.
  *
  * Removes a flow from the repeated hash structure and clears the
  * action payload.
@@ -518,19 +502,19 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
  */
 static int
 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
-		       struct tc_cls_flower_offload *flow, bool egress)
+		       struct tc_cls_flower_offload *flow)
 {
-	struct nfp_port *port = nfp_port_from_netdev(netdev);
 	struct nfp_flower_priv *priv = app->priv;
 	struct nfp_fl_payload *nfp_flow;
-	struct net_device *ingr_dev;
+	struct nfp_port *port = NULL;
 	int err;
 
-	ingr_dev = egress ? NULL : netdev;
-	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
-					      NFP_FL_STATS_CTX_DONT_CARE);
+	if (nfp_netdev_is_nfp_repr(netdev))
+		port = nfp_port_from_netdev(netdev);
+
+	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
 	if (!nfp_flow)
-		return egress ? 0 : -ENOENT;
+		return -ENOENT;
 
 	err = nfp_modify_flow_metadata(app, nfp_flow);
 	if (err)
@@ -539,13 +523,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
 	if (nfp_flow->nfp_tun_ipv4_addr)
 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
 
-	err = nfp_flower_xmit_flow(netdev, nfp_flow,
+	err = nfp_flower_xmit_flow(app, nfp_flow,
 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
 	if (err)
 		goto err_free_flow;
 
 err_free_flow:
-	port->tc_offload_cnt--;
+	if (port)
+		port->tc_offload_cnt--;
 	kfree(nfp_flow->action_data);
 	kfree(nfp_flow->mask_data);
 	kfree(nfp_flow->unmasked_data);
@@ -561,7 +546,6 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
  * @app:	Pointer to the APP handle
  * @netdev:	Netdev structure.
  * @flow:	TC flower classifier offload structure
- * @egress:	Netdev is the egress dev.
  *
  * Populates a flow statistics structure which which corresponds to a
  * specific flow.
@@ -570,22 +554,16 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
  */
 static int
 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
-		     struct tc_cls_flower_offload *flow, bool egress)
+		     struct tc_cls_flower_offload *flow)
 {
 	struct nfp_flower_priv *priv = app->priv;
 	struct nfp_fl_payload *nfp_flow;
-	struct net_device *ingr_dev;
 	u32 ctx_id;
 
-	ingr_dev = egress ? NULL : netdev;
-	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
-					      NFP_FL_STATS_CTX_DONT_CARE);
+	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
 	if (!nfp_flow)
 		return -EINVAL;
 
-	if (nfp_flow->ingress_offload && egress)
-		return 0;
-
 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
 
 	spin_lock_bh(&priv->stats_lock);
@@ -602,35 +580,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
 
 static int
 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
-			struct tc_cls_flower_offload *flower, bool egress)
+			struct tc_cls_flower_offload *flower)
 {
 	if (!eth_proto_is_802_3(flower->common.protocol))
 		return -EOPNOTSUPP;
 
 	switch (flower->command) {
 	case TC_CLSFLOWER_REPLACE:
-		return nfp_flower_add_offload(app, netdev, flower, egress);
+		return nfp_flower_add_offload(app, netdev, flower);
 	case TC_CLSFLOWER_DESTROY:
-		return nfp_flower_del_offload(app, netdev, flower, egress);
+		return nfp_flower_del_offload(app, netdev, flower);
 	case TC_CLSFLOWER_STATS:
-		return nfp_flower_get_stats(app, netdev, flower, egress);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
-				  void *cb_priv)
-{
-	struct nfp_repr *repr = cb_priv;
-
-	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
-		return -EOPNOTSUPP;
-
-	switch (type) {
-	case TC_SETUP_CLSFLOWER:
-		return nfp_flower_repr_offload(repr->app, repr->netdev,
-					       type_data, true);
+		return nfp_flower_get_stats(app, netdev, flower);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -647,7 +608,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
 	switch (type) {
 	case TC_SETUP_CLSFLOWER:
 		return nfp_flower_repr_offload(repr->app, repr->netdev,
-					       type_data, false);
+					       type_data);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -686,3 +647,129 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
 		return -EOPNOTSUPP;
 	}
 }
+
+struct nfp_flower_indr_block_cb_priv {
+	struct net_device *netdev;
+	struct nfp_app *app;
+	struct list_head list;
+};
+
+static struct nfp_flower_indr_block_cb_priv *
+nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+				     struct net_device *netdev)
+{
+	struct nfp_flower_indr_block_cb_priv *cb_priv;
+	struct nfp_flower_priv *priv = app->priv;
+
+	/* All callback list access should be protected by RTNL. */
+	ASSERT_RTNL();
+
+	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+		if (cb_priv->netdev == netdev)
+			return cb_priv;
+
+	return NULL;
+}
+
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+					  void *type_data, void *cb_priv)
+{
+	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+	struct tc_cls_flower_offload *flower = type_data;
+
+	if (flower->common.chain_index)
+		return -EOPNOTSUPP;
+
+	switch (type) {
+	case TC_SETUP_CLSFLOWER:
+		return nfp_flower_repr_offload(priv->app, priv->netdev,
+					       type_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int
+nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
+			       struct tc_block_offload *f)
+{
+	struct nfp_flower_indr_block_cb_priv *cb_priv;
+	struct nfp_flower_priv *priv = app->priv;
+	int err;
+
+	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+		return -EOPNOTSUPP;
+
+	switch (f->command) {
+	case TC_BLOCK_BIND:
+		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+		if (!cb_priv)
+			return -ENOMEM;
+
+		cb_priv->netdev = netdev;
+		cb_priv->app = app;
+		list_add(&cb_priv->list, &priv->indr_block_cb_priv);
+
+		err = tcf_block_cb_register(f->block,
+					    nfp_flower_setup_indr_block_cb,
+					    netdev, cb_priv, f->extack);
+		if (err) {
+			list_del(&cb_priv->list);
+			kfree(cb_priv);
+		}
+
+		return err;
+	case TC_BLOCK_UNBIND:
+		tcf_block_cb_unregister(f->block,
+					nfp_flower_setup_indr_block_cb, netdev);
+		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+		if (cb_priv) {
+			list_del(&cb_priv->list);
+			kfree(cb_priv);
+		}
+
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int
+nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+			    enum tc_setup_type type, void *type_data)
+{
+	switch (type) {
+	case TC_SETUP_BLOCK:
+		return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
+						      type_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+				       struct net_device *netdev,
+				       unsigned long event)
+{
+	int err;
+
+	if (!nfp_fl_is_netdev_to_offload(netdev))
+		return NOTIFY_OK;
+
+	if (event == NETDEV_REGISTER) {
+		err = __tc_indr_block_cb_register(netdev, app,
+						  nfp_flower_indr_setup_tc_cb,
+						  netdev);
+		if (err)
+			nfp_flower_cmsg_warn(app,
+					     "Indirect block reg failed - %s\n",
+					     netdev->name);
+	} else if (event == NETDEV_UNREGISTER) {
+		__tc_indr_block_cb_unregister(netdev,
+					      nfp_flower_indr_setup_tc_cb,
+					      netdev);
+	}
+
+	return NOTIFY_OK;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8e5bec0..2d9f26a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -4,7 +4,6 @@
 #include <linux/etherdevice.h>
 #include <linux/inetdevice.h>
 #include <net/netevent.h>
-#include <net/vxlan.h>
 #include <linux/idr.h>
 #include <net/dst_metadata.h>
 #include <net/arp.h>
@@ -182,18 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
 	}
 }
 
-static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
-{
-	if (!netdev->rtnl_link_ops)
-		return false;
-	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
-		return true;
-	if (netif_is_vxlan(netdev))
-		return true;
-
-	return false;
-}
-
 static int
 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
 			 gfp_t flag)
@@ -615,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
 
 	if (nfp_netdev_is_nfp_repr(netdev))
 		port = nfp_repr_get_port_id(netdev);
-	else if (!nfp_tun_is_netdev_to_offload(netdev))
+	else if (!nfp_fl_is_netdev_to_offload(netdev))
 		return;
 
 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -652,29 +639,16 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
 	mutex_unlock(&priv->nfp_mac_off_lock);
 }
 
-static int nfp_tun_mac_event_handler(struct notifier_block *nb,
-				     unsigned long event, void *ptr)
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+				 struct net_device *netdev,
+				 unsigned long event, void *ptr)
 {
-	struct nfp_flower_priv *app_priv;
-	struct net_device *netdev;
-	struct nfp_app *app;
-
 	if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
-		app_priv = container_of(nb, struct nfp_flower_priv,
-					nfp_tun_mac_nb);
-		app = app_priv->app;
-		netdev = netdev_notifier_info_to_dev(ptr);
-
 		/* If non-nfp netdev then free its offload index. */
-		if (nfp_tun_is_netdev_to_offload(netdev))
+		if (nfp_fl_is_netdev_to_offload(netdev))
 			nfp_tun_del_mac_idx(app, netdev->ifindex);
 	} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
 		   event == NETDEV_REGISTER) {
-		app_priv = container_of(nb, struct nfp_flower_priv,
-					nfp_tun_mac_nb);
-		app = app_priv->app;
-		netdev = netdev_notifier_info_to_dev(ptr);
-
 		nfp_tun_add_to_mac_offload_list(netdev, app);
 
 		/* Force a list write to keep NFP up to date. */
@@ -686,14 +660,11 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb,
 int nfp_tunnel_config_start(struct nfp_app *app)
 {
 	struct nfp_flower_priv *priv = app->priv;
-	struct net_device *netdev;
-	int err;
 
 	/* Initialise priv data for MAC offloading. */
 	priv->nfp_mac_off_count = 0;
 	mutex_init(&priv->nfp_mac_off_lock);
 	INIT_LIST_HEAD(&priv->nfp_mac_off_list);
-	priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
 	mutex_init(&priv->nfp_mac_index_lock);
 	INIT_LIST_HEAD(&priv->nfp_mac_index_list);
 	ida_init(&priv->nfp_mac_off_ids);
@@ -707,27 +678,7 @@ int nfp_tunnel_config_start(struct nfp_app *app)
 	INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
 	priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
 
-	err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
-	if (err)
-		goto err_free_mac_ida;
-
-	err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
-	if (err)
-		goto err_unreg_mac_nb;
-
-	/* Parse netdevs already registered for MACs that need offloaded. */
-	rtnl_lock();
-	for_each_netdev(&init_net, netdev)
-		nfp_tun_add_to_mac_offload_list(netdev, app);
-	rtnl_unlock();
-
-	return 0;
-
-err_unreg_mac_nb:
-	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
-err_free_mac_ida:
-	ida_destroy(&priv->nfp_mac_off_ids);
-	return err;
+	return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
 }
 
 void nfp_tunnel_config_stop(struct nfp_app *app)
@@ -739,7 +690,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
 	struct nfp_ipv4_addr_entry *ip_entry;
 	struct list_head *ptr, *storage;
 
-	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
 	unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
 
 	/* Free any memory that may be occupied by MAC list. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 68a0991..4a1b8f7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -136,6 +136,53 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
 	return old;
 }
 
+static int
+nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+	struct net_device *netdev;
+	struct nfp_app *app;
+
+	netdev = netdev_notifier_info_to_dev(ptr);
+	app = container_of(nb, struct nfp_app, netdev_nb);
+
+	if (app->type->netdev_event)
+		return app->type->netdev_event(app, netdev, event, ptr);
+	return NOTIFY_DONE;
+}
+
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+{
+	int err;
+
+	app->ctrl = ctrl;
+
+	if (app->type->start) {
+		err = app->type->start(app);
+		if (err)
+			return err;
+	}
+
+	app->netdev_nb.notifier_call = nfp_app_netdev_event;
+	err = register_netdevice_notifier(&app->netdev_nb);
+	if (err)
+		goto err_app_stop;
+
+	return 0;
+
+err_app_stop:
+	if (app->type->stop)
+		app->type->stop(app);
+	return err;
+}
+
+void nfp_app_stop(struct nfp_app *app)
+{
+	unregister_netdevice_notifier(&app->netdev_nb);
+
+	if (app->type->stop)
+		app->type->stop(app);
+}
+
 struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
 {
 	struct nfp_app *app;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 4d6ecf9..d578d85 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -69,6 +69,7 @@ extern const struct nfp_app_type app_abm;
  * @port_get_stats_strings:	get strings for extra statistics
  * @start:	start application logic
  * @stop:	stop application logic
+ * @netdev_event:	Netdevice notifier event
  * @ctrl_msg_rx:    control message handler
  * @ctrl_msg_rx_raw:	handler for control messages from data queues
  * @setup_tc:	setup TC ndo
@@ -122,6 +123,9 @@ struct nfp_app_type {
 	int (*start)(struct nfp_app *app);
 	void (*stop)(struct nfp_app *app);
 
+	int (*netdev_event)(struct nfp_app *app, struct net_device *netdev,
+			    unsigned long event, void *ptr);
+
 	void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
 	void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
 				unsigned int len);
@@ -151,6 +155,7 @@ struct nfp_app_type {
  * @reprs:	array of pointers to representors
  * @type:	pointer to const application ops and info
  * @ctrl_mtu:	MTU to set on the control vNIC (set in .init())
+ * @netdev_nb:	Netdevice notifier block
  * @priv:	app-specific priv data
  */
 struct nfp_app {
@@ -163,6 +168,9 @@ struct nfp_app {
 
 	const struct nfp_app_type *type;
 	unsigned int ctrl_mtu;
+
+	struct notifier_block netdev_nb;
+
 	void *priv;
 };
 
@@ -264,21 +272,6 @@ nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
 	return app->type->repr_change_mtu(app, netdev, new_mtu);
 }
 
-static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
-{
-	app->ctrl = ctrl;
-	if (!app->type->start)
-		return 0;
-	return app->type->start(app);
-}
-
-static inline void nfp_app_stop(struct nfp_app *app)
-{
-	if (!app->type->stop)
-		return;
-	app->type->stop(app);
-}
-
 static inline const char *nfp_app_name(struct nfp_app *app)
 {
 	if (!app)
@@ -430,6 +423,8 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
 
 struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
 void nfp_app_free(struct nfp_app *app);
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl);
+void nfp_app_stop(struct nfp_app *app);
 
 /* Callbacks shared between apps */
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 6f0c37d..bb3dbd7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -851,7 +851,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
 			    void __iomem *ctrl_bar);
 
 struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
 	      unsigned int max_tx_rings, unsigned int max_rx_rings);
 void nfp_net_free(struct nfp_net *nn);
 
@@ -868,6 +868,7 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
 void nfp_net_rss_write_itbl(struct nfp_net *nn);
 void nfp_net_rss_write_key(struct nfp_net *nn);
 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
 
 unsigned int
 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6bddfcf..9aa6265 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -279,7 +279,7 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
  *
  * Return: Negative errno on error, 0 on success
  */
-static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
 {
 	u32 mbox = nn->tlv_caps.mbox_off;
 	int ret;
@@ -890,8 +890,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 		u64_stats_update_end(&r_vec->tx_sync);
 	}
 
-	netdev_tx_sent_queue(nd_q, txbuf->real_len);
-
 	skb_tx_timestamp(skb);
 
 	tx_ring->wr_p += nr_frags + 1;
@@ -899,7 +897,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 		nfp_net_tx_ring_stop(nd_q, tx_ring);
 
 	tx_ring->wr_ptr_add += nr_frags + 1;
-	if (!skb->xmit_more || netif_xmit_stopped(nd_q))
+	if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
 		nfp_net_tx_xmit_more_flush(tx_ring);
 
 	return NETDEV_TX_OK;
@@ -3560,6 +3558,7 @@ void nfp_net_info(struct nfp_net *nn)
 /**
  * nfp_net_alloc() - Allocate netdev and related structure
  * @pdev:         PCI device
+ * @ctrl_bar:     PCI IOMEM with vNIC config memory
  * @needs_netdev: Whether to allocate a netdev for this vNIC
  * @max_tx_rings: Maximum number of TX rings supported by device
  * @max_rx_rings: Maximum number of RX rings supported by device
@@ -3570,11 +3569,12 @@ void nfp_net_info(struct nfp_net *nn)
  *
  * Return: NFP Net device structure, or ERR_PTR on error.
  */
-struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
-			      unsigned int max_tx_rings,
-			      unsigned int max_rx_rings)
+struct nfp_net *
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+	      unsigned int max_tx_rings, unsigned int max_rx_rings)
 {
 	struct nfp_net *nn;
+	int err;
 
 	if (needs_netdev) {
 		struct net_device *netdev;
@@ -3594,6 +3594,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
 	}
 
 	nn->dp.dev = &pdev->dev;
+	nn->dp.ctrl_bar = ctrl_bar;
 	nn->pdev = pdev;
 
 	nn->max_tx_rings = max_tx_rings;
@@ -3616,7 +3617,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
 
 	timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
 
+	err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+				     &nn->tlv_caps);
+	if (err)
+		goto err_free_nn;
+
 	return nn;
+
+err_free_nn:
+	if (nn->dp.netdev)
+		free_netdev(nn->dp.netdev);
+	else
+		vfree(nn);
+	return ERR_PTR(err);
 }
 
 /**
@@ -3889,11 +3902,6 @@ int nfp_net_init(struct nfp_net *nn)
 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
 	}
 
-	err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
-				     &nn->tlv_caps);
-	if (err)
-		return err;
-
 	if (nn->dp.netdev)
 		nfp_net_netdev_init(nn);
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index d7c8518..ccec699 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -397,6 +397,8 @@
 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
 
+#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET	5
+
 /**
  * VLAN filtering using general use mailbox
  * %NFP_NET_CFG_VLAN_FILTER:		Base address of VLAN filter mailbox
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 1e7d204..08f5fdb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
 
 	/* Allocate and initialise the vNIC */
-	nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
+	nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+			   n_tx_rings, n_rx_rings);
 	if (IS_ERR(nn))
 		return nn;
 
 	nn->app = pf->app;
 	nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
-	nn->dp.ctrl_bar = ctrl_bar;
 	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
 	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
 	nn->dp.is_vf = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index d2c1e9e..1145849 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 	rx_bar_off = NFP_PCIE_QUEUE(startq);
 
 	/* Allocate and initialise the netdev */
-	nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
+	nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
 	if (IS_ERR(nn)) {
 		err = PTR_ERR(nn);
 		goto err_ctrl_unmap;
@@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 	vf->nn = nn;
 
 	nn->fw_ver = fw_ver;
-	nn->dp.ctrl_bar = ctrl_bar;
 	nn->dp.is_vf = 1;
 	nn->stride_tx = stride;
 	nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 25382f8..89d1739 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -280,7 +280,7 @@
 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n)	((n) & 0xFFFF)
 
 /*
- * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
+ * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared
  * register definitions
  */
 #define LPC_RXFLTRW_ACCEPTUNICAST		(1 << 0)
@@ -291,7 +291,7 @@
 #define LPC_RXFLTRW_ACCEPTPERFECT		(1 << 5)
 
 /*
- * rxfliterctrl register definitions
+ * rxfilterctrl register definitions
  */
 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL		(1 << 12)
 #define LPC_RXFLTRWSTS_RXFILTERENWOL		(1 << 13)
@@ -783,8 +783,6 @@ static int lpc_mii_probe(struct net_device *ndev)
 
 	phy_set_max_speed(phydev, SPEED_100);
 
-	phydev->advertising = phydev->supported;
-
 	pldat->link = 0;
 	pldat->speed = 0;
 	pldat->duplex = -1;
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index a9f1bc0..1450f38 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -61,6 +61,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
 	"Transmit ring full",
 	"SPI errors",
 	"Write verify errors",
+	"Buffer available errors",
 };
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index d531050..97f9295 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -289,6 +289,14 @@ qcaspi_transmit(struct qcaspi *qca)
 
 	qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available);
 
+	if (available > QCASPI_HW_BUF_LEN) {
+		/* This could only happen by interferences on the SPI line.
+		 * So retry later ...
+		 */
+		qca->stats.buf_avail_err++;
+		return -1;
+	}
+
 	while (qca->txr.skb[qca->txr.head]) {
 		pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
 
@@ -355,7 +363,13 @@ qcaspi_receive(struct qcaspi *qca)
 	netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
 		   available);
 
-	if (available == 0) {
+	if (available > QCASPI_HW_BUF_LEN) {
+		/* This could only happen by interferences on the SPI line.
+		 * So retry later ...
+		 */
+		qca->stats.buf_avail_err++;
+		return -1;
+	} else if (available == 0) {
 		netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n");
 		return -1;
 	}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 2d2c497..eb9af45 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -74,6 +74,7 @@ struct qcaspi_stats {
 	u64 ring_full;
 	u64 spi_err;
 	u64 write_verify_failed;
+	u64 buf_avail_err;
 };
 
 struct qcaspi {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1fd0168..12811f1 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -56,13 +56,6 @@
 #define R8169_MSG_DEFAULT \
 	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
 
-#define TX_SLOTS_AVAIL(tp) \
-	(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
-
-/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
-#define TX_FRAGS_READY_FOR(tp,nr_frags) \
-	(TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
-
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
 static const int multicast_filter_limit = 32;
@@ -212,24 +205,24 @@ enum cfg_version {
 };
 
 static const struct pci_device_id rtl8169_pci_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_NCUBE,	0x8168), 0, 0, RTL_CFG_1 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
-	{ PCI_VENDOR_ID_DLINK,			0x4300,
-		PCI_VENDOR_ID_DLINK, 0x4b10,		 0, 0, RTL_CFG_1 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4302), 0, 0, RTL_CFG_0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_AT,		0xc107), 0, 0, RTL_CFG_0 },
-	{ PCI_DEVICE(0x16ec,			0x0116), 0, 0, RTL_CFG_0 },
+	{ PCI_VDEVICE(REALTEK,	0x8129), RTL_CFG_0 },
+	{ PCI_VDEVICE(REALTEK,	0x8136), RTL_CFG_2 },
+	{ PCI_VDEVICE(REALTEK,	0x8161), RTL_CFG_1 },
+	{ PCI_VDEVICE(REALTEK,	0x8167), RTL_CFG_0 },
+	{ PCI_VDEVICE(REALTEK,	0x8168), RTL_CFG_1 },
+	{ PCI_VDEVICE(NCUBE,	0x8168), RTL_CFG_1 },
+	{ PCI_VDEVICE(REALTEK,	0x8169), RTL_CFG_0 },
+	{ PCI_VENDOR_ID_DLINK,	0x4300,
+		PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
+	{ PCI_VDEVICE(DLINK,	0x4300), RTL_CFG_0 },
+	{ PCI_VDEVICE(DLINK,	0x4302), RTL_CFG_0 },
+	{ PCI_VDEVICE(AT,	0xc107), RTL_CFG_0 },
+	{ PCI_VDEVICE(USR,	0x0116), RTL_CFG_0 },
 	{ PCI_VENDOR_ID_LINKSYS,		0x1032,
 		PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
 	{ 0x0001,				0x8168,
 		PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
-	{0,},
+	{}
 };
 
 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
@@ -603,7 +596,6 @@ struct RxDesc {
 struct ring_info {
 	struct sk_buff	*skb;
 	u32		len;
-	u8		__pad[sizeof(void *) - sizeof(u32)];
 };
 
 struct rtl8169_counters {
@@ -661,7 +653,7 @@ struct rtl8169_private {
 	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
 	u16 cp_cmd;
 
-	u16 event_slow;
+	u16 irq_mask;
 	const struct rtl_coalesce_info *coalesce_info;
 	struct clk *clk;
 
@@ -1102,23 +1094,6 @@ static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
 	return rtl_eri_read(tp, reg, ERIAR_OOB);
 }
 
-static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
-{
-	switch (tp->mac_version) {
-	case RTL_GIGA_MAC_VER_27:
-	case RTL_GIGA_MAC_VER_28:
-	case RTL_GIGA_MAC_VER_31:
-		return r8168dp_ocp_read(tp, mask, reg);
-	case RTL_GIGA_MAC_VER_49:
-	case RTL_GIGA_MAC_VER_50:
-	case RTL_GIGA_MAC_VER_51:
-		return r8168ep_ocp_read(tp, mask, reg);
-	default:
-		BUG();
-		return ~0;
-	}
-}
-
 static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
 			      u32 data)
 {
@@ -1134,30 +1109,11 @@ static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
 		      data, ERIAR_OOB);
 }
 
-static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
-{
-	switch (tp->mac_version) {
-	case RTL_GIGA_MAC_VER_27:
-	case RTL_GIGA_MAC_VER_28:
-	case RTL_GIGA_MAC_VER_31:
-		r8168dp_ocp_write(tp, mask, reg, data);
-		break;
-	case RTL_GIGA_MAC_VER_49:
-	case RTL_GIGA_MAC_VER_50:
-	case RTL_GIGA_MAC_VER_51:
-		r8168ep_ocp_write(tp, mask, reg, data);
-		break;
-	default:
-		BUG();
-		break;
-	}
-}
-
-static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
+static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
 {
 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
 
-	ocp_write(tp, 0x1, 0x30, 0x00000001);
+	r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
 }
 
 #define OOB_CMD_RESET		0x00
@@ -1169,18 +1125,18 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
 	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
 }
 
-DECLARE_RTL_COND(rtl_ocp_read_cond)
+DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
 {
 	u16 reg;
 
 	reg = rtl8168_get_ocp_reg(tp);
 
-	return ocp_read(tp, 0x0f, reg) & 0x00000800;
+	return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
 }
 
 DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
 {
-	return ocp_read(tp, 0x0f, 0x124) & 0x00000001;
+	return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
 }
 
 DECLARE_RTL_COND(rtl_ocp_tx_cond)
@@ -1198,14 +1154,15 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
 
 static void rtl8168dp_driver_start(struct rtl8169_private *tp)
 {
-	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
-	rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
+	rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
 }
 
 static void rtl8168ep_driver_start(struct rtl8169_private *tp)
 {
-	ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
-	ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
+	r8168ep_ocp_write(tp, 0x01, 0x30,
+			  r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
 	rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
 }
 
@@ -1230,15 +1187,16 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
 
 static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
 {
-	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
-	rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
+	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+	rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
 }
 
 static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
 {
 	rtl8168ep_stop_cmac(tp);
-	ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
-	ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
+	r8168ep_ocp_write(tp, 0x01, 0x30,
+			  r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
 	rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
 }
 
@@ -1265,12 +1223,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp)
 {
 	u16 reg = rtl8168_get_ocp_reg(tp);
 
-	return !!(ocp_read(tp, 0x0f, reg) & 0x00008000);
+	return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
 }
 
 static bool r8168ep_check_dash(struct rtl8169_private *tp)
 {
-	return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001);
+	return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
 }
 
 static bool r8168_check_dash(struct rtl8169_private *tp)
@@ -1334,18 +1292,13 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
 	mmiowb();
 }
 
-static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
-{
-	RTL_W16(tp, IntrMask, bits);
-}
-
 #define RTL_EVENT_NAPI_RX	(RxOK | RxErr)
 #define RTL_EVENT_NAPI_TX	(TxOK | TxErr)
 #define RTL_EVENT_NAPI		(RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
 
-static void rtl_irq_enable_all(struct rtl8169_private *tp)
+static void rtl_irq_enable(struct rtl8169_private *tp)
 {
-	rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
+	RTL_W16(tp, IntrMask, tp->irq_mask);
 }
 
 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
@@ -2051,8 +2004,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
 };
 
-static void rtl8169_get_mac_version(struct rtl8169_private *tp,
-				    u8 default_version)
+static void rtl8169_get_mac_version(struct rtl8169_private *tp)
 {
 	/*
 	 * The driver currently handles the 8168Bf and the 8168Be identically
@@ -2066,120 +2018,107 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
 	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
 	 */
 	static const struct rtl_mac_info {
-		u32 mask;
-		u32 val;
-		int mac_version;
+		u16 mask;
+		u16 val;
+		u16 mac_version;
 	} mac_info[] = {
 		/* 8168EP family. */
-		{ 0x7cf00000, 0x50200000,	RTL_GIGA_MAC_VER_51 },
-		{ 0x7cf00000, 0x50100000,	RTL_GIGA_MAC_VER_50 },
-		{ 0x7cf00000, 0x50000000,	RTL_GIGA_MAC_VER_49 },
+		{ 0x7cf, 0x502,	RTL_GIGA_MAC_VER_51 },
+		{ 0x7cf, 0x501,	RTL_GIGA_MAC_VER_50 },
+		{ 0x7cf, 0x500,	RTL_GIGA_MAC_VER_49 },
 
 		/* 8168H family. */
-		{ 0x7cf00000, 0x54100000,	RTL_GIGA_MAC_VER_46 },
-		{ 0x7cf00000, 0x54000000,	RTL_GIGA_MAC_VER_45 },
+		{ 0x7cf, 0x541,	RTL_GIGA_MAC_VER_46 },
+		{ 0x7cf, 0x540,	RTL_GIGA_MAC_VER_45 },
 
 		/* 8168G family. */
-		{ 0x7cf00000, 0x5c800000,	RTL_GIGA_MAC_VER_44 },
-		{ 0x7cf00000, 0x50900000,	RTL_GIGA_MAC_VER_42 },
-		{ 0x7cf00000, 0x4c100000,	RTL_GIGA_MAC_VER_41 },
-		{ 0x7cf00000, 0x4c000000,	RTL_GIGA_MAC_VER_40 },
+		{ 0x7cf, 0x5c8,	RTL_GIGA_MAC_VER_44 },
+		{ 0x7cf, 0x509,	RTL_GIGA_MAC_VER_42 },
+		{ 0x7cf, 0x4c1,	RTL_GIGA_MAC_VER_41 },
+		{ 0x7cf, 0x4c0,	RTL_GIGA_MAC_VER_40 },
 
 		/* 8168F family. */
-		{ 0x7c800000, 0x48800000,	RTL_GIGA_MAC_VER_38 },
-		{ 0x7cf00000, 0x48100000,	RTL_GIGA_MAC_VER_36 },
-		{ 0x7cf00000, 0x48000000,	RTL_GIGA_MAC_VER_35 },
+		{ 0x7c8, 0x488,	RTL_GIGA_MAC_VER_38 },
+		{ 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
+		{ 0x7cf, 0x480,	RTL_GIGA_MAC_VER_35 },
 
 		/* 8168E family. */
-		{ 0x7c800000, 0x2c800000,	RTL_GIGA_MAC_VER_34 },
-		{ 0x7cf00000, 0x2c100000,	RTL_GIGA_MAC_VER_32 },
-		{ 0x7c800000, 0x2c000000,	RTL_GIGA_MAC_VER_33 },
+		{ 0x7c8, 0x2c8,	RTL_GIGA_MAC_VER_34 },
+		{ 0x7cf, 0x2c1,	RTL_GIGA_MAC_VER_32 },
+		{ 0x7c8, 0x2c0,	RTL_GIGA_MAC_VER_33 },
 
 		/* 8168D family. */
-		{ 0x7cf00000, 0x28100000,	RTL_GIGA_MAC_VER_25 },
-		{ 0x7c800000, 0x28000000,	RTL_GIGA_MAC_VER_26 },
+		{ 0x7cf, 0x281,	RTL_GIGA_MAC_VER_25 },
+		{ 0x7c8, 0x280,	RTL_GIGA_MAC_VER_26 },
 
 		/* 8168DP family. */
-		{ 0x7cf00000, 0x28800000,	RTL_GIGA_MAC_VER_27 },
-		{ 0x7cf00000, 0x28a00000,	RTL_GIGA_MAC_VER_28 },
-		{ 0x7cf00000, 0x28b00000,	RTL_GIGA_MAC_VER_31 },
+		{ 0x7cf, 0x288,	RTL_GIGA_MAC_VER_27 },
+		{ 0x7cf, 0x28a,	RTL_GIGA_MAC_VER_28 },
+		{ 0x7cf, 0x28b,	RTL_GIGA_MAC_VER_31 },
 
 		/* 8168C family. */
-		{ 0x7cf00000, 0x3c900000,	RTL_GIGA_MAC_VER_23 },
-		{ 0x7cf00000, 0x3c800000,	RTL_GIGA_MAC_VER_18 },
-		{ 0x7c800000, 0x3c800000,	RTL_GIGA_MAC_VER_24 },
-		{ 0x7cf00000, 0x3c000000,	RTL_GIGA_MAC_VER_19 },
-		{ 0x7cf00000, 0x3c200000,	RTL_GIGA_MAC_VER_20 },
-		{ 0x7cf00000, 0x3c300000,	RTL_GIGA_MAC_VER_21 },
-		{ 0x7c800000, 0x3c000000,	RTL_GIGA_MAC_VER_22 },
+		{ 0x7cf, 0x3c9,	RTL_GIGA_MAC_VER_23 },
+		{ 0x7cf, 0x3c8,	RTL_GIGA_MAC_VER_18 },
+		{ 0x7c8, 0x3c8,	RTL_GIGA_MAC_VER_24 },
+		{ 0x7cf, 0x3c0,	RTL_GIGA_MAC_VER_19 },
+		{ 0x7cf, 0x3c2,	RTL_GIGA_MAC_VER_20 },
+		{ 0x7cf, 0x3c3,	RTL_GIGA_MAC_VER_21 },
+		{ 0x7c8, 0x3c0,	RTL_GIGA_MAC_VER_22 },
 
 		/* 8168B family. */
-		{ 0x7cf00000, 0x38000000,	RTL_GIGA_MAC_VER_12 },
-		{ 0x7c800000, 0x38000000,	RTL_GIGA_MAC_VER_17 },
-		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
+		{ 0x7cf, 0x380,	RTL_GIGA_MAC_VER_12 },
+		{ 0x7c8, 0x380,	RTL_GIGA_MAC_VER_17 },
+		{ 0x7c8, 0x300,	RTL_GIGA_MAC_VER_11 },
 
 		/* 8101 family. */
-		{ 0x7c800000, 0x44800000,	RTL_GIGA_MAC_VER_39 },
-		{ 0x7c800000, 0x44000000,	RTL_GIGA_MAC_VER_37 },
-		{ 0x7cf00000, 0x40900000,	RTL_GIGA_MAC_VER_29 },
-		{ 0x7c800000, 0x40800000,	RTL_GIGA_MAC_VER_30 },
-		{ 0x7cf00000, 0x34900000,	RTL_GIGA_MAC_VER_08 },
-		{ 0x7cf00000, 0x24900000,	RTL_GIGA_MAC_VER_08 },
-		{ 0x7cf00000, 0x34800000,	RTL_GIGA_MAC_VER_07 },
-		{ 0x7cf00000, 0x24800000,	RTL_GIGA_MAC_VER_07 },
-		{ 0x7cf00000, 0x34000000,	RTL_GIGA_MAC_VER_13 },
-		{ 0x7cf00000, 0x34300000,	RTL_GIGA_MAC_VER_10 },
-		{ 0x7cf00000, 0x34200000,	RTL_GIGA_MAC_VER_16 },
-		{ 0x7c800000, 0x34800000,	RTL_GIGA_MAC_VER_09 },
-		{ 0x7c800000, 0x24800000,	RTL_GIGA_MAC_VER_09 },
-		{ 0x7c800000, 0x34000000,	RTL_GIGA_MAC_VER_16 },
+		{ 0x7c8, 0x448,	RTL_GIGA_MAC_VER_39 },
+		{ 0x7c8, 0x440,	RTL_GIGA_MAC_VER_37 },
+		{ 0x7cf, 0x409,	RTL_GIGA_MAC_VER_29 },
+		{ 0x7c8, 0x408,	RTL_GIGA_MAC_VER_30 },
+		{ 0x7cf, 0x349,	RTL_GIGA_MAC_VER_08 },
+		{ 0x7cf, 0x249,	RTL_GIGA_MAC_VER_08 },
+		{ 0x7cf, 0x348,	RTL_GIGA_MAC_VER_07 },
+		{ 0x7cf, 0x248,	RTL_GIGA_MAC_VER_07 },
+		{ 0x7cf, 0x340,	RTL_GIGA_MAC_VER_13 },
+		{ 0x7cf, 0x343,	RTL_GIGA_MAC_VER_10 },
+		{ 0x7cf, 0x342,	RTL_GIGA_MAC_VER_16 },
+		{ 0x7c8, 0x348,	RTL_GIGA_MAC_VER_09 },
+		{ 0x7c8, 0x248,	RTL_GIGA_MAC_VER_09 },
+		{ 0x7c8, 0x340,	RTL_GIGA_MAC_VER_16 },
 		/* FIXME: where did these entries come from ? -- FR */
-		{ 0xfc800000, 0x38800000,	RTL_GIGA_MAC_VER_15 },
-		{ 0xfc800000, 0x30800000,	RTL_GIGA_MAC_VER_14 },
+		{ 0xfc8, 0x388,	RTL_GIGA_MAC_VER_15 },
+		{ 0xfc8, 0x308,	RTL_GIGA_MAC_VER_14 },
 
 		/* 8110 family. */
-		{ 0xfc800000, 0x98000000,	RTL_GIGA_MAC_VER_06 },
-		{ 0xfc800000, 0x18000000,	RTL_GIGA_MAC_VER_05 },
-		{ 0xfc800000, 0x10000000,	RTL_GIGA_MAC_VER_04 },
-		{ 0xfc800000, 0x04000000,	RTL_GIGA_MAC_VER_03 },
-		{ 0xfc800000, 0x00800000,	RTL_GIGA_MAC_VER_02 },
-		{ 0xfc800000, 0x00000000,	RTL_GIGA_MAC_VER_01 },
+		{ 0xfc8, 0x980,	RTL_GIGA_MAC_VER_06 },
+		{ 0xfc8, 0x180,	RTL_GIGA_MAC_VER_05 },
+		{ 0xfc8, 0x100,	RTL_GIGA_MAC_VER_04 },
+		{ 0xfc8, 0x040,	RTL_GIGA_MAC_VER_03 },
+		{ 0xfc8, 0x008,	RTL_GIGA_MAC_VER_02 },
+		{ 0xfc8, 0x000,	RTL_GIGA_MAC_VER_01 },
 
 		/* Catch-all */
-		{ 0x00000000, 0x00000000,	RTL_GIGA_MAC_NONE   }
+		{ 0x000, 0x000,	RTL_GIGA_MAC_NONE   }
 	};
 	const struct rtl_mac_info *p = mac_info;
-	u32 reg;
+	u16 reg = RTL_R32(tp, TxConfig) >> 20;
 
-	reg = RTL_R32(tp, TxConfig);
 	while ((reg & p->mask) != p->val)
 		p++;
 	tp->mac_version = p->mac_version;
 
 	if (tp->mac_version == RTL_GIGA_MAC_NONE) {
-		dev_notice(tp_to_dev(tp),
-			   "unknown MAC, using family default\n");
-		tp->mac_version = default_version;
-	} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
-		tp->mac_version = tp->supports_gmii ?
-				  RTL_GIGA_MAC_VER_42 :
-				  RTL_GIGA_MAC_VER_43;
-	} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
-		tp->mac_version = tp->supports_gmii ?
-				  RTL_GIGA_MAC_VER_45 :
-				  RTL_GIGA_MAC_VER_47;
-	} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
-		tp->mac_version = tp->supports_gmii ?
-				  RTL_GIGA_MAC_VER_46 :
-				  RTL_GIGA_MAC_VER_48;
+		dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
+	} else if (!tp->supports_gmii) {
+		if (tp->mac_version == RTL_GIGA_MAC_VER_42)
+			tp->mac_version = RTL_GIGA_MAC_VER_43;
+		else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
+			tp->mac_version = RTL_GIGA_MAC_VER_47;
+		else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
+			tp->mac_version = RTL_GIGA_MAC_VER_48;
 	}
 }
 
-static void rtl8169_print_mac_version(struct rtl8169_private *tp)
-{
-	netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
-}
-
 struct phy_reg {
 	u16 reg;
 	u16 val;
@@ -3902,8 +3841,6 @@ static void rtl_hw_phy_config(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 
-	rtl8169_print_mac_version(tp);
-
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_01:
 		break;
@@ -4643,7 +4580,7 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
 	rtl_set_rx_mode(tp->dev);
 	/* no early-rx interrupts */
 	RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
-	rtl_irq_enable_all(tp);
+	rtl_irq_enable(tp);
 }
 
 static void rtl_hw_start_8169(struct rtl8169_private *tp)
@@ -5394,8 +5331,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
 
 	/* Work around for RxFIFO overflow. */
 	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
-		tp->event_slow |= RxFIFOOver | PCSTimeout;
-		tp->event_slow &= ~RxOverflow;
+		tp->irq_mask |= RxFIFOOver;
+		tp->irq_mask &= ~RxOverflow;
 	}
 
 	switch (tp->mac_version) {
@@ -5632,7 +5569,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
 static void rtl_hw_start_8101(struct rtl8169_private *tp)
 {
 	if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
-		tp->event_slow &= ~RxFIFOOver;
+		tp->irq_mask &= ~RxFIFOOver;
 
 	if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
 	    tp->mac_version == RTL_GIGA_MAC_VER_16)
@@ -5888,6 +5825,16 @@ static void rtl8169_tx_timeout(struct net_device *dev)
 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
 }
 
+static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
+{
+	u32 status = opts0 | len;
+
+	if (entry == NUM_TX_DESC - 1)
+		status |= RingEnd;
+
+	return cpu_to_le32(status);
+}
+
 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
 			      u32 *opts)
 {
@@ -5900,7 +5847,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
 	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
 		const skb_frag_t *frag = info->frags + cur_frag;
 		dma_addr_t mapping;
-		u32 status, len;
+		u32 len;
 		void *addr;
 
 		entry = (entry + 1) % NUM_TX_DESC;
@@ -5916,11 +5863,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
 			goto err_out;
 		}
 
-		/* Anti gcc 2.95.3 bugware (sic) */
-		status = opts[0] | len |
-			(RingEnd * !((entry + 1) % NUM_TX_DESC));
-
-		txd->opts1 = cpu_to_le32(status);
+		txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
 		txd->opts2 = cpu_to_le32(opts[1]);
 		txd->addr = cpu_to_le64(mapping);
 
@@ -6108,6 +6051,15 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
 	return true;
 }
 
+static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
+			       unsigned int nr_frags)
+{
+	unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
+
+	/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+	return slots_avail > nr_frags;
+}
+
 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 				      struct net_device *dev)
 {
@@ -6116,11 +6068,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 	struct TxDesc *txd = tp->TxDescArray + entry;
 	struct device *d = tp_to_dev(tp);
 	dma_addr_t mapping;
-	u32 status, len;
-	u32 opts[2];
+	u32 opts[2], len;
 	int frags;
 
-	if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
+	if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
 		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
 		goto err_stop_0;
 	}
@@ -6166,9 +6117,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 	/* Force memory writes to complete before releasing descriptor */
 	dma_wmb();
 
-	/* Anti gcc 2.95.3 bugware (sic) */
-	status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
-	txd->opts1 = cpu_to_le32(status);
+	txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
 
 	/* Force all memory writes to complete before notifying device */
 	wmb();
@@ -6179,7 +6128,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
 	mmiowb();
 
-	if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+	if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
 		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
 		 * not miss a ring update when it notices a stopped queue.
 		 */
@@ -6193,7 +6142,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 		 * can't.
 		 */
 		smp_mb();
-		if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
+		if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
 			netif_wake_queue(dev);
 	}
 
@@ -6257,7 +6206,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
 }
 
-static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+		   int budget)
 {
 	unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
 
@@ -6285,7 +6235,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
 		if (status & LastFrag) {
 			pkts_compl++;
 			bytes_compl += tx_skb->skb->len;
-			dev_consume_skb_any(tx_skb->skb);
+			napi_consume_skb(tx_skb->skb, budget);
 			tx_skb->skb = NULL;
 		}
 		dirty_tx++;
@@ -6310,7 +6260,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
 		 */
 		smp_mb();
 		if (netif_queue_stopped(dev) &&
-		    TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+		    rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
 			netif_wake_queue(dev);
 		}
 		/*
@@ -6461,7 +6411,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 	struct rtl8169_private *tp = dev_instance;
 	u16 status = rtl_get_events(tp);
 
-	if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
+	if (status == 0xffff || !(status & tp->irq_mask))
 		return IRQ_NONE;
 
 	if (unlikely(status & SYSErr)) {
@@ -6528,12 +6478,12 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
 
 	work_done = rtl_rx(dev, tp, (u32) budget);
 
-	rtl_tx(dev, tp);
+	rtl_tx(dev, tp, budget);
 
 	if (work_done < budget) {
 		napi_complete_done(napi, work_done);
 
-		rtl_irq_enable_all(tp);
+		rtl_irq_enable(tp);
 		mmiowb();
 	}
 
@@ -6584,7 +6534,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
 		phy_set_max_speed(phydev, SPEED_100);
 
 	/* Ensure to advertise everything, incl. pause */
-	phydev->advertising = phydev->supported;
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	phy_attached_info(phydev);
 
@@ -6824,8 +6774,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
 
 static int rtl8169_suspend(struct device *device)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct net_device *dev = pci_get_drvdata(pdev);
+	struct net_device *dev = dev_get_drvdata(device);
 	struct rtl8169_private *tp = netdev_priv(dev);
 
 	rtl8169_net_suspend(dev);
@@ -6855,8 +6804,7 @@ static void __rtl8169_resume(struct net_device *dev)
 
 static int rtl8169_resume(struct device *device)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct net_device *dev = pci_get_drvdata(pdev);
+	struct net_device *dev = dev_get_drvdata(device);
 	struct rtl8169_private *tp = netdev_priv(dev);
 
 	clk_prepare_enable(tp->clk);
@@ -6869,8 +6817,7 @@ static int rtl8169_resume(struct device *device)
 
 static int rtl8169_runtime_suspend(struct device *device)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct net_device *dev = pci_get_drvdata(pdev);
+	struct net_device *dev = dev_get_drvdata(device);
 	struct rtl8169_private *tp = netdev_priv(dev);
 
 	if (!tp->TxDescArray)
@@ -6891,8 +6838,7 @@ static int rtl8169_runtime_suspend(struct device *device)
 
 static int rtl8169_runtime_resume(struct device *device)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct net_device *dev = pci_get_drvdata(pdev);
+	struct net_device *dev = dev_get_drvdata(device);
 	struct rtl8169_private *tp = netdev_priv(dev);
 	rtl_rar_set(tp, dev->dev_addr);
 
@@ -6910,8 +6856,7 @@ static int rtl8169_runtime_resume(struct device *device)
 
 static int rtl8169_runtime_idle(struct device *device)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct net_device *dev = pci_get_drvdata(pdev);
+	struct net_device *dev = dev_get_drvdata(device);
 
 	if (!netif_running(dev) || !netif_carrier_ok(dev))
 		pm_schedule_suspend(device, 10000);
@@ -7023,31 +6968,26 @@ static const struct net_device_ops rtl_netdev_ops = {
 
 static const struct rtl_cfg_info {
 	void (*hw_start)(struct rtl8169_private *tp);
-	u16 event_slow;
+	u16 irq_mask;
 	unsigned int has_gmii:1;
 	const struct rtl_coalesce_info *coalesce_info;
-	u8 default_ver;
 } rtl_cfg_infos [] = {
 	[RTL_CFG_0] = {
 		.hw_start	= rtl_hw_start_8169,
-		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver,
+		.irq_mask	= SYSErr | LinkChg | RxOverflow | RxFIFOOver,
 		.has_gmii	= 1,
 		.coalesce_info	= rtl_coalesce_info_8169,
-		.default_ver	= RTL_GIGA_MAC_VER_01,
 	},
 	[RTL_CFG_1] = {
 		.hw_start	= rtl_hw_start_8168,
-		.event_slow	= SYSErr | LinkChg | RxOverflow,
+		.irq_mask	= LinkChg | RxOverflow,
 		.has_gmii	= 1,
 		.coalesce_info	= rtl_coalesce_info_8168_8136,
-		.default_ver	= RTL_GIGA_MAC_VER_11,
 	},
 	[RTL_CFG_2] = {
 		.hw_start	= rtl_hw_start_8101,
-		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver |
-				  PCSTimeout,
+		.irq_mask	= LinkChg | RxOverflow | RxFIFOOver,
 		.coalesce_info	= rtl_coalesce_info_8168_8136,
-		.default_ver	= RTL_GIGA_MAC_VER_13,
 	}
 };
 
@@ -7309,11 +7249,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	tp->mmio_addr = pcim_iomap_table(pdev)[region];
 
-	if (!pci_is_pcie(pdev))
-		dev_info(&pdev->dev, "not PCI Express\n");
-
 	/* Identify chip attached to board */
-	rtl8169_get_mac_version(tp, cfg->default_ver);
+	rtl8169_get_mac_version(tp);
+	if (tp->mac_version == RTL_GIGA_MAC_NONE)
+		return -ENODEV;
 
 	if (rtl_tbi_enabled(tp)) {
 		dev_err(&pdev->dev, "TBI fiber mode not supported\n");
@@ -7351,8 +7290,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	rtl_init_mdio_ops(tp);
 	rtl_init_jumbo_ops(tp);
 
-	rtl8169_print_mac_version(tp);
-
 	chipset = tp->mac_version;
 
 	rc = rtl_alloc_irq(tp);
@@ -7426,7 +7363,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	dev->max_mtu = jumbo_max;
 
 	tp->hw_start = cfg->hw_start;
-	tp->event_slow = cfg->event_slow;
+	tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
 	tp->coalesce_info = cfg->coalesce_info;
 
 	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
@@ -7450,9 +7387,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (rc)
 		goto err_mdio_unregister;
 
-	netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
+	netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
 		   rtl_chip_infos[chipset].name, dev->dev_addr,
-		   (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
+		   (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
 		   pci_irq_vector(pdev, 0));
 
 	if (jumbo_max > JUMBO_1K)
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 1c6e4df..ac9195a 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1032,7 +1032,6 @@ struct ravb_private {
 	phy_interface_t phy_interface;
 	int msg_enable;
 	int speed;
-	int duplex;
 	int emac_irq;
 	enum ravb_chip_id chip_id;
 	int rx_irqs[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index defed0d..ffc1ada 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -82,13 +82,6 @@ static int ravb_config(struct net_device *ndev)
 	return error;
 }
 
-static void ravb_set_duplex(struct net_device *ndev)
-{
-	struct ravb_private *priv = netdev_priv(ndev);
-
-	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
-}
-
 static void ravb_set_rate(struct net_device *ndev)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
@@ -406,13 +399,11 @@ static int ravb_ring_init(struct net_device *ndev, int q)
 /* E-MAC init function */
 static void ravb_emac_init(struct net_device *ndev)
 {
-	struct ravb_private *priv = netdev_priv(ndev);
-
 	/* Receive frame limit set register */
 	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
 
 	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
-	ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
+	ravb_write(ndev, ECMR_ZPF | ECMR_DM |
 		   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
 		   ECMR_TE | ECMR_RE, ECMR);
 
@@ -995,12 +986,6 @@ static void ravb_adjust_link(struct net_device *ndev)
 		ravb_rcv_snd_disable(ndev);
 
 	if (phydev->link) {
-		if (phydev->duplex != priv->duplex) {
-			new_state = true;
-			priv->duplex = phydev->duplex;
-			ravb_set_duplex(ndev);
-		}
-
 		if (phydev->speed != priv->speed) {
 			new_state = true;
 			priv->speed = phydev->speed;
@@ -1015,7 +1000,6 @@ static void ravb_adjust_link(struct net_device *ndev)
 		new_state = true;
 		priv->link = 0;
 		priv->speed = 0;
-		priv->duplex = -1;
 	}
 
 	/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1045,7 +1029,6 @@ static int ravb_phy_init(struct net_device *ndev)
 
 	priv->link = 0;
 	priv->speed = 0;
-	priv->duplex = -1;
 
 	/* Try connecting to PHY */
 	pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1088,6 +1071,10 @@ static int ravb_phy_init(struct net_device *ndev)
 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
 
+	/* Half Duplex is not supported */
+	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
 	phy_attached_info(phydev);
 
 	return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index beb0662..6213827 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1632,9 +1632,6 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
 {
 	struct rocker_world_ops *wops = rocker_port->rocker->wops;
 
-	if (netif_is_bridge_master(vlan->obj.orig_dev))
-		return -EOPNOTSUPP;
-
 	if (!wops->port_obj_vlan_add)
 		return -EOPNOTSUPP;
 
@@ -2145,8 +2142,6 @@ static int rocker_port_obj_del(struct net_device *dev,
 static const struct switchdev_ops rocker_port_switchdev_ops = {
 	.switchdev_port_attr_get	= rocker_port_attr_get,
 	.switchdev_port_attr_set	= rocker_port_attr_set,
-	.switchdev_port_obj_add		= rocker_port_obj_add,
-	.switchdev_port_obj_del		= rocker_port_obj_del,
 };
 
 struct rocker_fib_event_work {
@@ -2812,12 +2807,54 @@ static int rocker_switchdev_event(struct notifier_block *unused,
 	return NOTIFY_DONE;
 }
 
+static int
+rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
+			struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+	int err = -EOPNOTSUPP;
+
+	switch (event) {
+	case SWITCHDEV_PORT_OBJ_ADD:
+		err = rocker_port_obj_add(netdev, port_obj_info->obj,
+					  port_obj_info->trans);
+		break;
+	case SWITCHDEV_PORT_OBJ_DEL:
+		err = rocker_port_obj_del(netdev, port_obj_info->obj);
+		break;
+	}
+
+	port_obj_info->handled = true;
+	return notifier_from_errno(err);
+}
+
+static int rocker_switchdev_blocking_event(struct notifier_block *unused,
+					   unsigned long event, void *ptr)
+{
+	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+	if (!rocker_port_dev_check(dev))
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case SWITCHDEV_PORT_OBJ_ADD:
+	case SWITCHDEV_PORT_OBJ_DEL:
+		return rocker_switchdev_port_obj_event(event, dev, ptr);
+	}
+
+	return NOTIFY_DONE;
+}
+
 static struct notifier_block rocker_switchdev_notifier = {
 	.notifier_call = rocker_switchdev_event,
 };
 
+static struct notifier_block rocker_switchdev_blocking_notifier = {
+	.notifier_call = rocker_switchdev_blocking_event,
+};
+
 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	struct notifier_block *nb;
 	struct rocker *rocker;
 	int err;
 
@@ -2933,6 +2970,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto err_register_switchdev_notifier;
 	}
 
+	nb = &rocker_switchdev_blocking_notifier;
+	err = register_switchdev_blocking_notifier(nb);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n");
+		goto err_register_switchdev_blocking_notifier;
+	}
+
 	rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
 
 	dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
@@ -2940,6 +2984,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	return 0;
 
+err_register_switchdev_blocking_notifier:
+	unregister_switchdev_notifier(&rocker_switchdev_notifier);
 err_register_switchdev_notifier:
 	unregister_fib_notifier(&rocker->fib_nb);
 err_register_fib_notifier:
@@ -2971,6 +3017,10 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 static void rocker_remove(struct pci_dev *pdev)
 {
 	struct rocker *rocker = pci_get_drvdata(pdev);
+	struct notifier_block *nb;
+
+	nb = &rocker_switchdev_blocking_notifier;
+	unregister_switchdev_blocking_notifier(nb);
 
 	unregister_switchdev_notifier(&rocker_switchdev_notifier);
 	unregister_fib_notifier(&rocker->fib_nb);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7eeac3d..b6a5005 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6041,6 +6041,10 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
 	{ NVRAM_PARTITION_TYPE_LICENSE,		   0,    0, "sfc_license" },
 	{ NVRAM_PARTITION_TYPE_PHY_MIN,		   0xff, 0, "sfc_phy_fw" },
+	/* MUM and SUC firmware share the same partition type */
+	{ NVRAM_PARTITION_TYPE_MUM_FIRMWARE,	   0,    0, "sfc_mumfw" },
+	{ NVRAM_PARTITION_TYPE_EXPANSION_UEFI,	   0,    0, "sfc_uefi" },
+	{ NVRAM_PARTITION_TYPE_STATUS,		   0,    0, "sfc_status" }
 };
 
 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
@@ -6091,6 +6095,9 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
 	part->common.mtd.flags = MTD_CAP_NORFLASH;
 	part->common.mtd.size = size;
 	part->common.mtd.erasesize = erase_size;
+	/* sfc_status is read-only */
+	if (!erase_size)
+		part->common.mtd.flags |= MTD_NO_ERASE;
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c3ad564..22eb059 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -553,13 +553,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 	if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
 		goto err;
 
-	/* Update BQL */
-	netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
-
 	efx_tx_maybe_stop_queue(tx_queue);
 
 	/* Pass off to hardware */
-	if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+	if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
 		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
 
 		/* There could be packets left on the partner queue if those
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index d9d0d03..bba9733 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -234,6 +234,9 @@
 
 #define DESC_NUM	256
 
+#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define NETSEC_RX_BUF_SZ 1536
+
 #define DESC_SZ	sizeof(struct netsec_de)
 
 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)	((x) & 0xffff0000)
@@ -571,34 +574,10 @@ static const struct ethtool_ops netsec_ethtool_ops = {
 
 /************* NETDEV_OPS FOLLOW *************/
 
-static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
-					struct netsec_desc *desc)
-{
-	struct sk_buff *skb;
-
-	if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
-		skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
-	} else {
-		desc->len = L1_CACHE_ALIGN(desc->len);
-		skb = netdev_alloc_skb(priv->ndev, desc->len);
-	}
-	if (!skb)
-		return NULL;
-
-	desc->addr = skb->data;
-	desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
-					DMA_FROM_DEVICE);
-	if (dma_mapping_error(priv->dev, desc->dma_addr)) {
-		dev_kfree_skb_any(skb);
-		return NULL;
-	}
-	return skb;
-}
 
 static void netsec_set_rx_de(struct netsec_priv *priv,
 			     struct netsec_desc_ring *dring, u16 idx,
-			     const struct netsec_desc *desc,
-			     struct sk_buff *skb)
+			     const struct netsec_desc *desc)
 {
 	struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
 	u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
@@ -617,59 +596,6 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
 	dring->desc[idx].dma_addr = desc->dma_addr;
 	dring->desc[idx].addr = desc->addr;
 	dring->desc[idx].len = desc->len;
-	dring->desc[idx].skb = skb;
-}
-
-static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
-					struct netsec_desc_ring *dring,
-					u16 idx,
-					struct netsec_rx_pkt_info *rxpi,
-					struct netsec_desc *desc, u16 *len)
-{
-	struct netsec_de de = {};
-
-	memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
-
-	*len = de.buf_len_info >> 16;
-
-	rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
-	rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
-	rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
-							NETSEC_RX_PKT_ERR_MASK;
-	*desc = dring->desc[idx];
-	return desc->skb;
-}
-
-static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
-					      struct netsec_rx_pkt_info *rxpi,
-					      struct netsec_desc *desc,
-					      u16 *len)
-{
-	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
-	struct sk_buff *tmp_skb, *skb = NULL;
-	struct netsec_desc td;
-	int tail;
-
-	*rxpi = (struct netsec_rx_pkt_info){};
-
-	td.len = priv->ndev->mtu + 22;
-
-	tmp_skb = netsec_alloc_skb(priv, &td);
-
-	tail = dring->tail;
-
-	if (!tmp_skb) {
-		netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
-				 dring->desc[tail].skb);
-	} else {
-		skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
-		netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
-	}
-
-	/* move tail ahead */
-	dring->tail = (dring->tail + 1) % DESC_NUM;
-
-	return skb;
 }
 
 static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
@@ -736,19 +662,65 @@ static int netsec_process_tx(struct netsec_priv *priv, int budget)
 	return done;
 }
 
+static void *netsec_alloc_rx_data(struct netsec_priv *priv,
+				  dma_addr_t *dma_handle, u16 *desc_len)
+{
+	size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	size_t payload_len = NETSEC_RX_BUF_SZ;
+	dma_addr_t mapping;
+	void *buf;
+
+	total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
+
+	buf = napi_alloc_frag(total_len);
+	if (!buf)
+		return NULL;
+
+	mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len,
+				 DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(priv->dev, mapping)))
+		goto err_out;
+
+	*dma_handle = mapping;
+	*desc_len = payload_len;
+
+	return buf;
+
+err_out:
+	skb_free_frag(buf);
+	return NULL;
+}
+
+static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+	u16 idx = from;
+
+	while (num) {
+		netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
+		idx++;
+		if (idx >= DESC_NUM)
+			idx = 0;
+		num--;
+	}
+}
+
 static int netsec_process_rx(struct netsec_priv *priv, int budget)
 {
 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 	struct net_device *ndev = priv->ndev;
 	struct netsec_rx_pkt_info rx_info;
-	int done = 0;
-	struct netsec_desc desc;
 	struct sk_buff *skb;
-	u16 len;
+	int done = 0;
 
 	while (done < budget) {
 		u16 idx = dring->tail;
 		struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
+		struct netsec_desc *desc = &dring->desc[idx];
+		u16 pkt_len, desc_len;
+		dma_addr_t dma_handle;
+		void *buf_addr;
+		u32 truesize;
 
 		if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
 			/* reading the register clears the irq */
@@ -762,18 +734,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
 		 */
 		dma_rmb();
 		done++;
-		skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
-		if (unlikely(!skb) || rx_info.err_flag) {
+
+		pkt_len = de->buf_len_info >> 16;
+		rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
+			NETSEC_RX_PKT_ERR_MASK;
+		rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+		if (rx_info.err_flag) {
 			netif_err(priv, drv, priv->ndev,
-				  "%s: rx fail err(%d)\n",
-				  __func__, rx_info.err_code);
+				  "%s: rx fail err(%d)\n", __func__,
+				  rx_info.err_code);
 			ndev->stats.rx_dropped++;
+			dring->tail = (dring->tail + 1) % DESC_NUM;
+			/* reuse buffer page frag */
+			netsec_rx_fill(priv, idx, 1);
 			continue;
 		}
+		rx_info.rx_cksum_result =
+			(de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
 
-		dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
-				 DMA_FROM_DEVICE);
-		skb_put(skb, len);
+		/* allocate a fresh buffer and map it to the hardware.
+		 * This will eventually replace the old buffer in the hardware
+		 */
+		buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+		if (unlikely(!buf_addr))
+			break;
+
+		dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
+					DMA_FROM_DEVICE);
+		prefetch(desc->addr);
+
+		truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) +
+			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		skb = build_skb(desc->addr, truesize);
+		if (unlikely(!skb)) {
+			/* free the newly allocated buffer, we are not going to
+			 * use it
+			 */
+			dma_unmap_single(priv->dev, dma_handle, desc_len,
+					 DMA_FROM_DEVICE);
+			skb_free_frag(buf_addr);
+			netif_err(priv, drv, priv->ndev,
+				  "rx failed to build skb\n");
+			break;
+		}
+		dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len,
+				       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+
+		/* Update the descriptor with the new buffer we allocated */
+		desc->len = desc_len;
+		desc->dma_addr = dma_handle;
+		desc->addr = buf_addr;
+
+		skb_reserve(skb, NETSEC_SKB_PAD);
+		skb_put(skb, pkt_len);
 		skb->protocol = eth_type_trans(skb, priv->ndev);
 
 		if (priv->rx_cksum_offload_flag &&
@@ -782,8 +795,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
 
 		if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
 			ndev->stats.rx_packets++;
-			ndev->stats.rx_bytes += len;
+			ndev->stats.rx_bytes += pkt_len;
 		}
+
+		netsec_rx_fill(priv, idx, 1);
+		dring->tail = (dring->tail + 1) % DESC_NUM;
 	}
 
 	return done;
@@ -946,7 +962,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
 		dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
 				 id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
 							      DMA_TO_DEVICE);
-		dev_kfree_skb(desc->skb);
+		if (id == NETSEC_RING_RX)
+			skb_free_frag(desc->addr);
+		else if (id == NETSEC_RING_TX)
+			dev_kfree_skb(desc->skb);
 	}
 
 	memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
@@ -977,47 +996,50 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
 static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
 {
 	struct netsec_desc_ring *dring = &priv->desc_ring[id];
-	int ret = 0;
 
 	dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
 					   &dring->desc_dma, GFP_KERNEL);
-	if (!dring->vaddr) {
-		ret = -ENOMEM;
+	if (!dring->vaddr)
 		goto err;
-	}
 
 	dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
-	if (!dring->desc) {
-		ret = -ENOMEM;
+	if (!dring->desc)
 		goto err;
-	}
 
 	return 0;
 err:
 	netsec_free_dring(priv, id);
 
-	return ret;
+	return -ENOMEM;
 }
 
 static int netsec_setup_rx_dring(struct netsec_priv *priv)
 {
 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
-	struct netsec_desc desc;
-	struct sk_buff *skb;
-	int n;
+	int i;
 
-	desc.len = priv->ndev->mtu + 22;
+	for (i = 0; i < DESC_NUM; i++) {
+		struct netsec_desc *desc = &dring->desc[i];
+		dma_addr_t dma_handle;
+		void *buf;
+		u16 len;
 
-	for (n = 0; n < DESC_NUM; n++) {
-		skb = netsec_alloc_skb(priv, &desc);
-		if (!skb) {
+		buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+		if (!buf) {
 			netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
-			return -ENOMEM;
+			goto err_out;
 		}
-		netsec_set_rx_de(priv, dring, n, &desc, skb);
+		desc->dma_addr = dma_handle;
+		desc->addr = buf;
+		desc->len = len;
 	}
 
+	netsec_rx_fill(priv, 0, DESC_NUM);
+
 	return 0;
+
+err_out:
+	return -ENOMEM;
 }
 
 static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
@@ -1377,6 +1399,8 @@ static int netsec_netdev_init(struct net_device *ndev)
 	int ret;
 	u16 data;
 
+	BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
+
 	ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
 	if (ret)
 		return ret;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6732f5c..9e7391f 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1117,7 +1117,7 @@ static void ave_phy_adjust_link(struct net_device *ndev)
 		if (phydev->asym_pause)
 			rmt_adv |= LPA_PAUSE_ASYM;
 
-		lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
 		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
 		if (cap & FLOW_CTRL_TX)
 			txcr |= AVE_TXCR_FLOCTR;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5710864..d1f61c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -458,8 +458,10 @@ stmmac_get_pauseparam(struct net_device *netdev,
 		if (!adv_lp.pause)
 			return;
 	} else {
-		if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
-		    !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
+		if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				       netdev->phydev->supported) ||
+		    linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				      netdev->phydev->supported))
 			return;
 	}
 
@@ -487,8 +489,10 @@ stmmac_set_pauseparam(struct net_device *netdev,
 		if (!adv_lp.pause)
 			return -EOPNOTSUPP;
 	} else {
-		if (!(phy->supported & SUPPORTED_Pause) ||
-		    !(phy->supported & SUPPORTED_Asym_Pause))
+		if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				       phy->supported) ||
+		    linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				      phy->supported))
 			return -EOPNOTSUPP;
 	}
 
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 500f7ed..e4aa030 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -283,7 +283,7 @@ struct cpsw_ss_regs {
 
 #define CTRL_V2_TS_BITS \
 	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
-	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
 
 #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
 #define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
@@ -293,7 +293,7 @@ struct cpsw_ss_regs {
 #define CTRL_V3_TS_BITS \
 	(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
 	 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
-	 TS_LTYPE1_EN)
+	 TS_LTYPE1_EN | VLAN_LTYPE1_EN)
 
 #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
 #define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
@@ -466,6 +466,8 @@ struct cpsw_priv {
 	bool				mqprio_hw;
 	int				fifo_bw[CPSW_TC_NUM];
 	int				shp_cfg_speed;
+	int				tx_ts_enabled;
+	int				rx_ts_enabled;
 	u32 emac_port;
 	struct cpsw_common *cpsw;
 };
@@ -565,26 +567,14 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
 				(func)(slave++, ##arg);			\
 	} while (0)
 
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+				    __be16 proto, u16 vid);
+
 static inline int cpsw_get_slave_port(u32 slave_num)
 {
 	return slave_num + 1;
 }
 
-static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
-{
-	struct cpsw_common *cpsw = priv->cpsw;
-
-	if (cpsw->data.dual_emac) {
-		struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
-
-		cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
-				   ALE_VLAN, slave->port_vlan, 0);
-		return;
-	}
-
-	cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
-}
-
 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 {
 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -640,7 +630,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 
 			/* Clear all mcast from ALE */
 			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
-			__dev_mc_unsync(ndev, NULL);
+			__hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
 
 			/* Flood All Unicast Packets to Host port */
 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -661,29 +651,148 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 	}
 }
 
-static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
-{
-	struct cpsw_priv *priv = netdev_priv(ndev);
+struct addr_sync_ctx {
+	struct net_device *ndev;
+	const u8 *addr;		/* address to be synched */
+	int consumed;		/* number of address instances */
+	int flush;		/* flush flag */
+};
 
-	cpsw_add_mcast(priv, addr);
-	return 0;
-}
-
-static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+		       int vid, int add)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
 	struct cpsw_common *cpsw = priv->cpsw;
-	int vid, flags;
+	int mask, flags, ret;
 
-	if (cpsw->data.dual_emac) {
-		vid = cpsw->slaves[priv->emac_port].port_vlan;
-		flags = ALE_VLAN;
-	} else {
-		vid = 0;
-		flags = 0;
+	if (vid < 0) {
+		if (cpsw->data.dual_emac)
+			vid = cpsw->slaves[priv->emac_port].port_vlan;
+		else
+			vid = 0;
 	}
 
-	cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+	mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
+	flags = vid ? ALE_VLAN : 0;
+
+	if (add)
+		ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+	else
+		ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+	return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+	struct addr_sync_ctx *sync_ctx = ctx;
+	struct netdev_hw_addr *ha;
+	int found = 0, ret = 0;
+
+	if (!vdev || !(vdev->flags & IFF_UP))
+		return 0;
+
+	/* vlan address is relevant if its sync_cnt != 0 */
+	netdev_for_each_mc_addr(ha, vdev) {
+		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+			found = ha->sync_cnt;
+			break;
+		}
+	}
+
+	if (found)
+		sync_ctx->consumed++;
+
+	if (sync_ctx->flush) {
+		if (!found)
+			cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+		return 0;
+	}
+
+	if (found)
+		ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+	return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+	struct addr_sync_ctx sync_ctx;
+	int ret;
+
+	sync_ctx.consumed = 0;
+	sync_ctx.addr = addr;
+	sync_ctx.ndev = ndev;
+	sync_ctx.flush = 0;
+
+	ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+	if (sync_ctx.consumed < num && !ret)
+		ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+	return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+	struct addr_sync_ctx sync_ctx;
+
+	sync_ctx.consumed = 0;
+	sync_ctx.addr = addr;
+	sync_ctx.ndev = ndev;
+	sync_ctx.flush = 1;
+
+	vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+	if (sync_ctx.consumed == num)
+		cpsw_set_mc(ndev, addr, -1, 0);
+
+	return 0;
+}
+
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+	struct addr_sync_ctx *sync_ctx = ctx;
+	struct netdev_hw_addr *ha;
+	int found = 0;
+
+	if (!vdev || !(vdev->flags & IFF_UP))
+		return 0;
+
+	/* vlan address is relevant if its sync_cnt != 0 */
+	netdev_for_each_mc_addr(ha, vdev) {
+		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+			found = ha->sync_cnt;
+			break;
+		}
+	}
+
+	if (!found)
+		return 0;
+
+	sync_ctx->consumed++;
+	cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+	return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+	struct addr_sync_ctx sync_ctx;
+
+	sync_ctx.addr = addr;
+	sync_ctx.ndev = ndev;
+	sync_ctx.consumed = 0;
+
+	vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+	if (sync_ctx.consumed < num)
+		cpsw_set_mc(ndev, addr, -1, 0);
+
 	return 0;
 }
 
@@ -704,7 +813,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 	/* Restore allmulti on vlans if necessary */
 	cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
 
-	__dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
+	/* add/remove mcast address either for real netdev or for vlan */
+	__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+			       cpsw_del_mc_addr);
 }
 
 static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -796,6 +907,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
 	struct net_device	*ndev = skb->dev;
 	int			ret = 0, port;
 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
+	struct cpsw_priv	*priv;
 
 	if (cpsw->data.dual_emac) {
 		port = CPDMA_RX_SOURCE_PORT(status);
@@ -830,7 +942,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
 		skb_put(skb, len);
 		if (status & CPDMA_RX_VLAN_ENCAP)
 			cpsw_rx_vlan_encap(skb);
-		cpts_rx_timestamp(cpsw->cpts, skb);
+		priv = netdev_priv(ndev);
+		if (priv->rx_ts_enabled)
+			cpts_rx_timestamp(cpsw->cpts, skb);
 		skb->protocol = eth_type_trans(skb, ndev);
 		netif_receive_skb(skb);
 		ndev->stats.rx_bytes += len;
@@ -1845,9 +1959,23 @@ static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
 	slave_write(slave, tx_prio_map, tx_prio_rg);
 }
 
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+	struct cpsw_priv *priv = arg;
+
+	if (!vdev)
+		return 0;
+
+	cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+	return 0;
+}
+
 /* restore resources after port reset */
 static void cpsw_restore(struct cpsw_priv *priv)
 {
+	/* restore vlan configurations */
+	vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
 	/* restore MQPRIO offload */
 	for_each_slave(priv, cpsw_mqprio_resume, priv);
 
@@ -1964,7 +2092,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
 	struct cpsw_common *cpsw = priv->cpsw;
 
 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
-	__dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
+	__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
 	netif_tx_stop_all_queues(priv->ndev);
 	netif_carrier_off(priv->ndev);
 
@@ -2003,7 +2131,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 	}
 
 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
-	    cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
+	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 
 	q_idx = skb_get_queue_mapping(skb);
@@ -2047,13 +2175,13 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 
 #if IS_ENABLED(CONFIG_TI_CPTS)
 
-static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
 {
+	struct cpsw_common *cpsw = priv->cpsw;
 	struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
 	u32 ts_en, seq_id;
 
-	if (!cpts_is_tx_enabled(cpsw->cpts) &&
-	    !cpts_is_rx_enabled(cpsw->cpts)) {
+	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
 		slave_write(slave, 0, CPSW1_TS_CTL);
 		return;
 	}
@@ -2061,10 +2189,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
 	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
 	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
 
-	if (cpts_is_tx_enabled(cpsw->cpts))
+	if (priv->tx_ts_enabled)
 		ts_en |= CPSW_V1_TS_TX_EN;
 
-	if (cpts_is_rx_enabled(cpsw->cpts))
+	if (priv->rx_ts_enabled)
 		ts_en |= CPSW_V1_TS_RX_EN;
 
 	slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -2084,20 +2212,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 	case CPSW_VERSION_2:
 		ctrl &= ~CTRL_V2_ALL_TS_MASK;
 
-		if (cpts_is_tx_enabled(cpsw->cpts))
+		if (priv->tx_ts_enabled)
 			ctrl |= CTRL_V2_TX_TS_BITS;
 
-		if (cpts_is_rx_enabled(cpsw->cpts))
+		if (priv->rx_ts_enabled)
 			ctrl |= CTRL_V2_RX_TS_BITS;
 		break;
 	case CPSW_VERSION_3:
 	default:
 		ctrl &= ~CTRL_V3_ALL_TS_MASK;
 
-		if (cpts_is_tx_enabled(cpsw->cpts))
+		if (priv->tx_ts_enabled)
 			ctrl |= CTRL_V3_TX_TS_BITS;
 
-		if (cpts_is_rx_enabled(cpsw->cpts))
+		if (priv->rx_ts_enabled)
 			ctrl |= CTRL_V3_RX_TS_BITS;
 		break;
 	}
@@ -2107,6 +2235,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
 	slave_write(slave, ctrl, CPSW2_CONTROL);
 	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
 }
 
 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2114,7 +2243,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 	struct cpsw_priv *priv = netdev_priv(dev);
 	struct hwtstamp_config cfg;
 	struct cpsw_common *cpsw = priv->cpsw;
-	struct cpts *cpts = cpsw->cpts;
 
 	if (cpsw->version != CPSW_VERSION_1 &&
 	    cpsw->version != CPSW_VERSION_2 &&
@@ -2133,7 +2261,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 
 	switch (cfg.rx_filter) {
 	case HWTSTAMP_FILTER_NONE:
-		cpts_rx_enable(cpts, 0);
+		priv->rx_ts_enabled = 0;
 		break;
 	case HWTSTAMP_FILTER_ALL:
 	case HWTSTAMP_FILTER_NTP_ALL:
@@ -2141,7 +2269,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2153,18 +2281,18 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 		break;
 	default:
 		return -ERANGE;
 	}
 
-	cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
+	priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
 
 	switch (cpsw->version) {
 	case CPSW_VERSION_1:
-		cpsw_hwtstamp_v1(cpsw);
+		cpsw_hwtstamp_v1(priv);
 		break;
 	case CPSW_VERSION_2:
 	case CPSW_VERSION_3:
@@ -2180,7 +2308,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 {
 	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
-	struct cpts *cpts = cpsw->cpts;
+	struct cpsw_priv *priv = netdev_priv(dev);
 	struct hwtstamp_config cfg;
 
 	if (cpsw->version != CPSW_VERSION_1 &&
@@ -2189,10 +2317,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 		return -EOPNOTSUPP;
 
 	cfg.flags = 0;
-	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
-		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
-	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
-			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+	cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	cfg.rx_filter = priv->rx_ts_enabled;
 
 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 }
@@ -2415,6 +2541,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
 				  HOST_PORT_NUM, ALE_VLAN, vid);
 	ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
 				  0, ALE_VLAN, vid);
+	ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
 err:
 	pm_runtime_put(cpsw->dev);
 	return ret;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index b96b93c..054f782 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -86,6 +86,25 @@ static int cpts_purge_events(struct cpts *cpts)
 	return removed ? 0 : -1;
 }
 
+static void cpts_purge_txq(struct cpts *cpts)
+{
+	struct cpts_skb_cb_data *skb_cb;
+	struct sk_buff *skb, *tmp;
+	int removed = 0;
+
+	skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+		skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+		if (time_after(jiffies, skb_cb->tmo)) {
+			__skb_unlink(skb, &cpts->txq);
+			dev_consume_skb_any(skb);
+			++removed;
+		}
+	}
+
+	if (removed)
+		dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
+}
+
 static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
 {
 	struct sk_buff *skb, *tmp;
@@ -119,9 +138,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
 
 		if (time_after(jiffies, skb_cb->tmo)) {
 			/* timeout any expired skbs over 1s */
-			dev_dbg(cpts->dev,
-				"expiring tx timestamp mtype %u seqid %04x\n",
-				mtype, seqid);
+			dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
 			__skb_unlink(skb, &cpts->txq);
 			dev_consume_skb_any(skb);
 		}
@@ -294,8 +311,11 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
 	spin_lock_irqsave(&cpts->lock, flags);
 	ts = ns_to_timespec64(timecounter_read(&cpts->tc));
 
-	if (!skb_queue_empty(&cpts->txq))
-		delay = CPTS_SKB_TX_WORK_TIMEOUT;
+	if (!skb_queue_empty(&cpts->txq)) {
+		cpts_purge_txq(cpts);
+		if (!skb_queue_empty(&cpts->txq))
+			delay = CPTS_SKB_TX_WORK_TIMEOUT;
+	}
 	spin_unlock_irqrestore(&cpts->lock, flags);
 
 	pr_debug("cpts overflow check at %lld.%09ld\n",
@@ -410,8 +430,6 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 	u64 ns;
 	struct skb_shared_hwtstamps *ssh;
 
-	if (!cpts->rx_enable)
-		return;
 	ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
 	if (!ns)
 		return;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 73d73fa..d2c7dec 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -136,26 +136,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
 			 struct device_node *node);
 void cpts_release(struct cpts *cpts);
 
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
-	cpts->rx_enable = enable;
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
-	return !!cpts->rx_enable;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
-	cpts->tx_enable = enable;
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
-	return !!cpts->tx_enable;
-}
-
 static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
 {
 	unsigned int class = ptp_classify_raw(skb);
@@ -197,24 +177,6 @@ static inline void cpts_unregister(struct cpts *cpts)
 {
 }
 
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
-	return false;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
-	return false;
-}
-
 static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
 {
 	return false;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0397ccb..20d81e0 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -763,6 +763,8 @@ struct gbe_priv {
 
 	int                             cpts_registered;
 	struct cpts                     *cpts;
+	int				rx_ts_enabled;
+	int				tx_ts_enabled;
 };
 
 struct gbe_intf {
@@ -2564,7 +2566,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
 
 	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
-	    !cpts_is_tx_enabled(gbe_dev->cpts))
+	    !gbe_dev->tx_ts_enabled)
 		return 0;
 
 	/* If phy has the txtstamp api, assume it will do it.
@@ -2598,7 +2600,9 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
 		return 0;
 	}
 
-	cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+	if (gbe_dev->rx_ts_enabled)
+		cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+
 	p_info->rxtstamp_complete = true;
 
 	return 0;
@@ -2614,10 +2618,8 @@ static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
 		return -EOPNOTSUPP;
 
 	cfg.flags = 0;
-	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
-		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
-	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
-			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+	cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	cfg.rx_filter = gbe_dev->rx_ts_enabled;
 
 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 }
@@ -2628,8 +2630,8 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
 	struct gbe_slave *slave = gbe_intf->slave;
 	u32 ts_en, seq_id, ctl;
 
-	if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
-	    !cpts_is_tx_enabled(gbe_dev->cpts)) {
+	if (!gbe_dev->rx_ts_enabled &&
+	    !gbe_dev->tx_ts_enabled) {
 		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
 		return;
 	}
@@ -2641,10 +2643,10 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
 		(slave->ts_ctl.uni ?  TS_UNI_EN :
 			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
 
-	if (cpts_is_tx_enabled(gbe_dev->cpts))
+	if (gbe_dev->tx_ts_enabled)
 		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
 
-	if (cpts_is_rx_enabled(gbe_dev->cpts))
+	if (gbe_dev->rx_ts_enabled)
 		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
 
 	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
@@ -2670,10 +2672,10 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
 
 	switch (cfg.tx_type) {
 	case HWTSTAMP_TX_OFF:
-		cpts_tx_enable(cpts, 0);
+		gbe_dev->tx_ts_enabled = 0;
 		break;
 	case HWTSTAMP_TX_ON:
-		cpts_tx_enable(cpts, 1);
+		gbe_dev->tx_ts_enabled = 1;
 		break;
 	default:
 		return -ERANGE;
@@ -2681,12 +2683,12 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
 
 	switch (cfg.rx_filter) {
 	case HWTSTAMP_FILTER_NONE:
-		cpts_rx_enable(cpts, 0);
+		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2698,7 +2700,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 		break;
 	default:
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 6a71c2c..c50a977 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -607,9 +607,9 @@ static void tc_handle_link_change(struct net_device *dev)
 
 static int tc_mii_probe(struct net_device *dev)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 	struct tc35815_local *lp = netdev_priv(dev);
 	struct phy_device *phydev;
-	u32 dropmask;
 
 	phydev = phy_find_first(lp->mii_bus);
 	if (!phydev) {
@@ -630,17 +630,22 @@ static int tc_mii_probe(struct net_device *dev)
 
 	/* mask with MAC supported features */
 	phy_set_max_speed(phydev, SPEED_100);
-	dropmask = 0;
-	if (options.speed == 10)
-		dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
-	else if (options.speed == 100)
-		dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
-	if (options.duplex == 1)
-		dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
-	else if (options.duplex == 2)
-		dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
-	phydev->supported &= ~dropmask;
-	phydev->advertising = phydev->supported;
+	if (options.speed == 10) {
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+	} else if (options.speed == 100) {
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+	}
+	if (options.duplex == 1) {
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+	} else if (options.duplex == 2) {
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+	}
+	linkmode_and(phydev->supported, phydev->supported, mask);
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	lp->link = 0;
 	lp->speed = 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index a0cd1c4..58bbba8 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -70,6 +70,7 @@ struct geneve_dev {
 	bool		   collect_md;
 	bool		   use_udp6_rx_checksums;
 	bool		   ttl_inherit;
+	enum ifla_geneve_df df;
 };
 
 struct geneve_sock {
@@ -387,6 +388,59 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 	return 0;
 }
 
+/* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */
+static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+	struct genevehdr *geneveh;
+	struct geneve_sock *gs;
+	u8 zero_vni[3] = { 0 };
+	u8 *vni = zero_vni;
+
+	if (skb->len < GENEVE_BASE_HLEN)
+		return -EINVAL;
+
+	geneveh = geneve_hdr(skb);
+	if (geneveh->ver != GENEVE_VER)
+		return -EINVAL;
+
+	if (geneveh->proto_type != htons(ETH_P_TEB))
+		return -EINVAL;
+
+	gs = rcu_dereference_sk_user_data(sk);
+	if (!gs)
+		return -ENOENT;
+
+	if (geneve_get_sk_family(gs) == AF_INET) {
+		struct iphdr *iph = ip_hdr(skb);
+		__be32 addr4 = 0;
+
+		if (!gs->collect_md) {
+			vni = geneve_hdr(skb)->vni;
+			addr4 = iph->daddr;
+		}
+
+		return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT;
+	}
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (geneve_get_sk_family(gs) == AF_INET6) {
+		struct ipv6hdr *ip6h = ipv6_hdr(skb);
+		struct in6_addr addr6;
+
+		memset(&addr6, 0, sizeof(struct in6_addr));
+
+		if (!gs->collect_md) {
+			vni = geneve_hdr(skb)->vni;
+			addr6 = ip6h->daddr;
+		}
+
+		return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT;
+	}
+#endif
+
+	return -EPFNOSUPPORT;
+}
+
 static struct socket *geneve_create_sock(struct net *net, bool ipv6,
 					 __be16 port, bool ipv6_rx_csum)
 {
@@ -544,6 +598,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
 	tunnel_cfg.gro_receive = geneve_gro_receive;
 	tunnel_cfg.gro_complete = geneve_gro_complete;
 	tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+	tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup;
 	tunnel_cfg.encap_destroy = NULL;
 	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
 	list_add(&gs->list, &gn->sock_list);
@@ -823,8 +878,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 	struct rtable *rt;
 	struct flowi4 fl4;
 	__u8 tos, ttl;
+	__be16 df = 0;
 	__be16 sport;
-	__be16 df;
 	int err;
 
 	rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
@@ -838,6 +893,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 	if (geneve->collect_md) {
 		tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
 		ttl = key->ttl;
+
+		df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
 	} else {
 		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
 		if (geneve->ttl_inherit)
@@ -845,8 +902,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 		else
 			ttl = key->ttl;
 		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+		if (geneve->df == GENEVE_DF_SET) {
+			df = htons(IP_DF);
+		} else if (geneve->df == GENEVE_DF_INHERIT) {
+			struct ethhdr *eth = eth_hdr(skb);
+
+			if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+				df = htons(IP_DF);
+			} else if (ntohs(eth->h_proto) == ETH_P_IP) {
+				struct iphdr *iph = ip_hdr(skb);
+
+				if (iph->frag_off & htons(IP_DF))
+					df = htons(IP_DF);
+			}
+		}
 	}
-	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
 
 	err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
 	if (unlikely(err))
@@ -1093,6 +1164,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
 	[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]	= { .type = NLA_U8 },
 	[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]	= { .type = NLA_U8 },
 	[IFLA_GENEVE_TTL_INHERIT]	= { .type = NLA_U8 },
+	[IFLA_GENEVE_DF]		= { .type = NLA_U8 },
 };
 
 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1128,6 +1200,16 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
 		}
 	}
 
+	if (data[IFLA_GENEVE_DF]) {
+		enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
+		if (df < 0 || df > GENEVE_DF_MAX) {
+			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
+					    "Invalid DF attribute");
+			return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
@@ -1173,7 +1255,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
 			    struct netlink_ext_ack *extack,
 			    const struct ip_tunnel_info *info,
 			    bool metadata, bool ipv6_rx_csum,
-			    bool ttl_inherit)
+			    bool ttl_inherit, enum ifla_geneve_df df)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -1223,6 +1305,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
 	geneve->collect_md = metadata;
 	geneve->use_udp6_rx_checksums = ipv6_rx_csum;
 	geneve->ttl_inherit = ttl_inherit;
+	geneve->df = df;
 
 	err = register_netdevice(dev);
 	if (err)
@@ -1242,7 +1325,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
 			  struct netlink_ext_ack *extack,
 			  struct ip_tunnel_info *info, bool *metadata,
 			  bool *use_udp6_rx_checksums, bool *ttl_inherit,
-			  bool changelink)
+			  enum ifla_geneve_df *df, bool changelink)
 {
 	int attrtype;
 
@@ -1330,6 +1413,9 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
 	if (data[IFLA_GENEVE_TOS])
 		info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
 
+	if (data[IFLA_GENEVE_DF])
+		*df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
 	if (data[IFLA_GENEVE_LABEL]) {
 		info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
 				  IPV6_FLOWLABEL_MASK;
@@ -1448,6 +1534,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
 			  struct nlattr *tb[], struct nlattr *data[],
 			  struct netlink_ext_ack *extack)
 {
+	enum ifla_geneve_df df = GENEVE_DF_UNSET;
 	bool use_udp6_rx_checksums = false;
 	struct ip_tunnel_info info;
 	bool ttl_inherit = false;
@@ -1456,12 +1543,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
 
 	init_tnl_info(&info, GENEVE_UDP_PORT);
 	err = geneve_nl2info(tb, data, extack, &info, &metadata,
-			     &use_udp6_rx_checksums, &ttl_inherit, false);
+			     &use_udp6_rx_checksums, &ttl_inherit, &df, false);
 	if (err)
 		return err;
 
 	err = geneve_configure(net, dev, extack, &info, metadata,
-			       use_udp6_rx_checksums, ttl_inherit);
+			       use_udp6_rx_checksums, ttl_inherit, df);
 	if (err)
 		return err;
 
@@ -1524,6 +1611,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
 	struct ip_tunnel_info info;
 	bool metadata;
 	bool use_udp6_rx_checksums;
+	enum ifla_geneve_df df;
 	bool ttl_inherit;
 	int err;
 
@@ -1539,7 +1627,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
 	use_udp6_rx_checksums = geneve->use_udp6_rx_checksums;
 	ttl_inherit = geneve->ttl_inherit;
 	err = geneve_nl2info(tb, data, extack, &info, &metadata,
-			     &use_udp6_rx_checksums, &ttl_inherit, true);
+			     &use_udp6_rx_checksums, &ttl_inherit, &df, true);
 	if (err)
 		return err;
 
@@ -1572,6 +1660,7 @@ static size_t geneve_get_size(const struct net_device *dev)
 		nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_GENEVE_DF */
 		nla_total_size(sizeof(__be32)) +  /* IFLA_GENEVE_LABEL */
 		nla_total_size(sizeof(__be16)) +  /* IFLA_GENEVE_PORT */
 		nla_total_size(0) +	 /* IFLA_GENEVE_COLLECT_METADATA */
@@ -1620,6 +1709,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
 	    nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label))
 		goto nla_put_failure;
 
+	if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df))
+		goto nla_put_failure;
+
 	if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
 		goto nla_put_failure;
 
@@ -1666,12 +1758,13 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
 
 	memset(tb, 0, sizeof(tb));
 	dev = rtnl_create_link(net, name, name_assign_type,
-			       &geneve_link_ops, tb);
+			       &geneve_link_ops, tb, NULL);
 	if (IS_ERR(dev))
 		return dev;
 
 	init_tnl_info(&info, dst_port);
-	err = geneve_configure(net, dev, NULL, &info, true, true, false);
+	err = geneve_configure(net, dev, NULL, &info,
+			       true, true, false, GENEVE_DF_UNSET);
 	if (err) {
 		free_netdev(dev);
 		return ERR_PTR(err);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf36e7f..85936ed 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -605,9 +605,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 				     IEEE_8021Q_INFO);
 
 		vlan->value = 0;
-		vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
-		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
-				VLAN_PRIO_SHIFT;
+		vlan->vlanid = skb_vlan_tag_get_id(skb);
+		vlan->cfi = skb_vlan_tag_get_cfi(skb);
+		vlan->pri = skb_vlan_tag_get_prio(skb);
 	}
 
 	if (skb_is_gso(skb)) {
@@ -781,7 +781,8 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 	}
 
 	if (vlan) {
-		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
+		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
+			(vlan->cfi ? VLAN_CFI_MASK : 0);
 
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 				       vlan_tci);
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 6fe5dc9..9d0504f 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -66,7 +66,6 @@ static struct phy_driver am79c_driver[] = { {
 	.name		= "AM79C874",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= am79c_config_init,
 	.ack_interrupt	= am79c_ack_interrupt,
 	.config_intr	= am79c_config_intr,
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index 632472c..beb3309 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -25,15 +25,10 @@
 #define PHY_ID_AQR107	0x03a1b4e0
 #define PHY_ID_AQR405	0x03a1b4b0
 
-#define PHY_AQUANTIA_FEATURES	(SUPPORTED_10000baseT_Full | \
-				 SUPPORTED_1000baseT_Full | \
-				 SUPPORTED_100baseT_Full | \
-				 PHY_DEFAULT_FEATURES)
-
 static int aquantia_config_aneg(struct phy_device *phydev)
 {
-	phydev->supported = PHY_AQUANTIA_FEATURES;
-	phydev->advertising = phydev->supported;
+	linkmode_copy(phydev->supported, phy_10gbit_features);
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	return 0;
 }
@@ -116,7 +111,6 @@ static struct phy_driver aquantia_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Aquantia AQ1202",
 	.features	= PHY_10GBIT_FULL_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.aneg_done	= genphy_c45_aneg_done,
 	.config_aneg    = aquantia_config_aneg,
 	.config_intr	= aquantia_config_intr,
@@ -128,7 +122,6 @@ static struct phy_driver aquantia_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Aquantia AQ2104",
 	.features	= PHY_10GBIT_FULL_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.aneg_done	= genphy_c45_aneg_done,
 	.config_aneg    = aquantia_config_aneg,
 	.config_intr	= aquantia_config_intr,
@@ -140,7 +133,6 @@ static struct phy_driver aquantia_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Aquantia AQR105",
 	.features	= PHY_10GBIT_FULL_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.aneg_done	= genphy_c45_aneg_done,
 	.config_aneg    = aquantia_config_aneg,
 	.config_intr	= aquantia_config_intr,
@@ -152,7 +144,6 @@ static struct phy_driver aquantia_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Aquantia AQR106",
 	.features	= PHY_10GBIT_FULL_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.aneg_done	= genphy_c45_aneg_done,
 	.config_aneg    = aquantia_config_aneg,
 	.config_intr	= aquantia_config_intr,
@@ -164,7 +155,6 @@ static struct phy_driver aquantia_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Aquantia AQR107",
 	.features	= PHY_10GBIT_FULL_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.aneg_done	= genphy_c45_aneg_done,
 	.config_aneg    = aquantia_config_aneg,
 	.config_intr	= aquantia_config_intr,
@@ -176,7 +166,6 @@ static struct phy_driver aquantia_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Aquantia AQR405",
 	.features	= PHY_10GBIT_FULL_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.aneg_done	= genphy_c45_aneg_done,
 	.config_aneg    = aquantia_config_aneg,
 	.config_intr	= aquantia_config_intr,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index e74a047..f9432d0 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -379,7 +379,6 @@ static struct phy_driver at803x_driver[] = {
 	.suspend		= at803x_suspend,
 	.resume			= at803x_resume,
 	.features		= PHY_GBIT_FEATURES,
-	.flags			= PHY_HAS_INTERRUPT,
 	.ack_interrupt		= at803x_ack_interrupt,
 	.config_intr		= at803x_config_intr,
 }, {
@@ -395,7 +394,6 @@ static struct phy_driver at803x_driver[] = {
 	.suspend		= at803x_suspend,
 	.resume			= at803x_resume,
 	.features		= PHY_BASIC_FEATURES,
-	.flags			= PHY_HAS_INTERRUPT,
 	.ack_interrupt		= at803x_ack_interrupt,
 	.config_intr		= at803x_config_intr,
 }, {
@@ -410,7 +408,6 @@ static struct phy_driver at803x_driver[] = {
 	.suspend		= at803x_suspend,
 	.resume			= at803x_resume,
 	.features		= PHY_GBIT_FEATURES,
-	.flags			= PHY_HAS_INTERRUPT,
 	.aneg_done		= at803x_aneg_done,
 	.ack_interrupt		= &at803x_ack_interrupt,
 	.config_intr		= &at803x_config_intr,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index d95bffd..a88dd14 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -43,7 +43,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
 	int reg, err;
 
 	/* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
-	phydev->supported |= SUPPORTED_Pause;
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
 
 	reg = phy_read(phydev, MII_BCM63XX_IR);
 	if (reg < 0)
@@ -69,7 +69,7 @@ static struct phy_driver bcm63xx_driver[] = {
 	.phy_id_mask	= 0xfffffc00,
 	.name		= "Broadcom BCM63XX (1)",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+	.flags		= PHY_IS_INTERNAL,
 	.config_init	= bcm63xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm63xx_config_intr,
@@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = {
 	.phy_id		= 0x002bdc00,
 	.phy_id_mask	= 0xfffffc00,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+	.flags		= PHY_IS_INTERNAL,
 	.config_init	= bcm63xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm63xx_config_intr,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b2b6307..712224c 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -650,6 +650,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 
 static struct phy_driver bcm7xxx_driver[] = {
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+	BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
 	BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
 	BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
 	BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
@@ -670,6 +671,7 @@ static struct phy_driver bcm7xxx_driver[] = {
 
 static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
 	{ PHY_ID_BCM7250, 0xfffffff0, },
+	{ PHY_ID_BCM7255, 0xfffffff0, },
 	{ PHY_ID_BCM7260, 0xfffffff0, },
 	{ PHY_ID_BCM7268, 0xfffffff0, },
 	{ PHY_ID_BCM7271, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index f7ebdcf..1b35018 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -86,8 +86,12 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
 
 static int bcm87xx_config_init(struct phy_device *phydev)
 {
-	phydev->supported = SUPPORTED_10000baseR_FEC;
-	phydev->advertising = ADVERTISED_10000baseR_FEC;
+	linkmode_zero(phydev->supported);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+			 phydev->supported);
+	linkmode_zero(phydev->advertising);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+			 phydev->advertising);
 	phydev->state = PHY_NOLINK;
 	phydev->autoneg = AUTONEG_DISABLE;
 
@@ -193,7 +197,6 @@ static struct phy_driver bcm87xx_driver[] = {
 	.phy_id		= PHY_ID_BCM8706,
 	.phy_id_mask	= 0xffffffff,
 	.name		= "Broadcom BCM8706",
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm87xx_config_init,
 	.config_aneg	= bcm87xx_config_aneg,
 	.read_status	= bcm87xx_read_status,
@@ -205,7 +208,6 @@ static struct phy_driver bcm87xx_driver[] = {
 	.phy_id		= PHY_ID_BCM8727,
 	.phy_id_mask	= 0xffffffff,
 	.name		= "Broadcom BCM8727",
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm87xx_config_init,
 	.config_aneg	= bcm87xx_config_aneg,
 	.read_status	= bcm87xx_read_status,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 7045370..aa73c5c 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -602,7 +602,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5411",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -611,7 +610,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5421",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -620,7 +618,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM54210E",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -629,7 +626,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5461",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -638,7 +634,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM54612E",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -647,7 +642,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM54616S",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= bcm54616s_config_aneg,
 	.ack_interrupt	= bcm_phy_ack_intr,
@@ -657,7 +651,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5464",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -666,7 +659,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5481",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= bcm5481_config_aneg,
 	.ack_interrupt	= bcm_phy_ack_intr,
@@ -676,7 +668,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask    = 0xfffffff0,
 	.name           = "Broadcom BCM54810",
 	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = bcm54xx_config_init,
 	.config_aneg    = bcm5481_config_aneg,
 	.ack_interrupt  = bcm_phy_ack_intr,
@@ -686,7 +677,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5482",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm5482_config_init,
 	.read_status	= bcm5482_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
@@ -696,7 +686,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM50610",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -705,7 +694,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM50610M",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -714,7 +702,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM57780",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -723,7 +710,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCMAC131",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= brcm_fet_config_init,
 	.ack_interrupt	= brcm_fet_ack_interrupt,
 	.config_intr	= brcm_fet_config_intr,
@@ -732,7 +718,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "Broadcom BCM5241",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= brcm_fet_config_init,
 	.ack_interrupt	= brcm_fet_ack_interrupt,
 	.config_intr	= brcm_fet_config_intr,
@@ -751,7 +736,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.phy_id_mask    = 0xfffffff0,
 	.name           = "Broadcom BCM89610",
 	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = bcm54xx_config_init,
 	.ack_interrupt  = bcm_phy_ack_intr,
 	.config_intr    = bcm_phy_config_intr,
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index c05af00..fea61c8 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -108,7 +108,6 @@ static struct phy_driver cis820x_driver[] = {
 	.name		= "Cicada Cis8201",
 	.phy_id_mask	= 0x000ffff0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &cis820x_config_init,
 	.ack_interrupt	= &cis820x_ack_interrupt,
 	.config_intr	= &cis820x_config_intr,
@@ -117,7 +116,6 @@ static struct phy_driver cis820x_driver[] = {
 	.name		= "Cicada Cis8204",
 	.phy_id_mask	= 0x000fffc0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &cis820x_config_init,
 	.ack_interrupt	= &cis820x_ack_interrupt,
 	.config_intr	= &cis820x_config_intr,
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5ee99b3..9716200 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,7 +150,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.name		= "Davicom DM9161E",
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
 	.ack_interrupt	= dm9161_ack_interrupt,
@@ -160,7 +159,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.name		= "Davicom DM9161B/C",
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
 	.ack_interrupt	= dm9161_ack_interrupt,
@@ -170,7 +168,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.name		= "Davicom DM9161A",
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
 	.ack_interrupt	= dm9161_ack_interrupt,
@@ -180,7 +177,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.name		= "Davicom DM9131",
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 } };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index edd4d44..18b41bc 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1521,7 +1521,6 @@ static struct phy_driver dp83640_driver = {
 	.phy_id_mask	= 0xfffffff0,
 	.name		= "NatSemi DP83640",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.probe		= dp83640_probe,
 	.remove		= dp83640_remove,
 	.soft_reset	= dp83640_soft_reset,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6e8a2a4..24c7f14 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -318,7 +318,6 @@ static struct phy_driver dp83822_driver[] = {
 		.phy_id_mask = 0xfffffff0,
 		.name = "TI DP83822",
 		.features = PHY_BASIC_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.config_init = dp83822_config_init,
 		.soft_reset = dp83822_phy_reset,
 		.get_wol = dp83822_get_wol,
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 6e8e423..a6b5590 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -108,7 +108,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
 		.phy_id_mask	= 0xfffffff0,			\
 		.name		= _name,			\
 		.features	= PHY_BASIC_FEATURES,		\
-		.flags		= PHY_HAS_INTERRUPT,		\
 								\
 		.soft_reset	= genphy_soft_reset,		\
 		.config_init	= _config_init,			\
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index b3935778..da6a67d 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -334,7 +334,6 @@ static struct phy_driver dp83867_driver[] = {
 		.phy_id_mask	= 0xfffffff0,
 		.name		= "TI DP83867",
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 
 		.config_init	= dp83867_config_init,
 		.soft_reset	= dp83867_phy_reset,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 78cad13..da13356 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -346,7 +346,6 @@ static struct phy_driver dp83811_driver[] = {
 		.phy_id_mask = 0xfffffff0,
 		.name = "TI DP83TC811",
 		.features = PHY_BASIC_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.config_init = dp83811_config_init,
 		.config_aneg = dp83811_config_aneg,
 		.soft_reset = dp83811_phy_reset,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 67b2608..f7fb627 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -223,14 +223,23 @@ struct phy_device *fixed_phy_register(unsigned int irq,
 
 	switch (status->speed) {
 	case SPEED_1000:
-		phy->supported = PHY_1000BT_FEATURES;
-		break;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				 phy->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				 phy->supported);
+		/* fall through */
 	case SPEED_100:
-		phy->supported = PHY_100BT_FEATURES;
-		break;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+				 phy->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+				 phy->supported);
+		/* fall through */
 	case SPEED_10:
 	default:
-		phy->supported = PHY_10BT_FEATURES;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+				 phy->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+				 phy->supported);
 	}
 
 	ret = phy_device_register(phy);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 791587a..7d5938b 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -25,6 +25,7 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
+#include <linux/property.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -36,14 +37,34 @@ MODULE_LICENSE("GPL");
 
 /* IP101A/G - IP1001 */
 #define IP10XX_SPEC_CTRL_STATUS		16	/* Spec. Control Register */
-#define IP1001_RXPHASE_SEL		(1<<0)	/* Add delay on RX_CLK */
-#define IP1001_TXPHASE_SEL		(1<<1)	/* Add delay on TX_CLK */
+#define IP1001_RXPHASE_SEL		BIT(0)	/* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL		BIT(1)	/* Add delay on TX_CLK */
 #define IP1001_SPEC_CTRL_STATUS_2	20	/* IP1001 Spec. Control Reg 2 */
 #define IP1001_APS_ON			11	/* IP1001 APS Mode  bit */
-#define IP101A_G_APS_ON			2	/* IP101A/G APS Mode bit */
+#define IP101A_G_APS_ON			BIT(1)	/* IP101A/G APS Mode bit */
 #define IP101A_G_IRQ_CONF_STATUS	0x11	/* Conf Info IRQ & Status Reg */
-#define	IP101A_G_IRQ_PIN_USED		(1<<15) /* INTR pin used */
-#define	IP101A_G_IRQ_DEFAULT		IP101A_G_IRQ_PIN_USED
+#define	IP101A_G_IRQ_PIN_USED		BIT(15) /* INTR pin used */
+#define IP101A_G_IRQ_ALL_MASK		BIT(11) /* IRQ's inactive */
+#define IP101A_G_IRQ_SPEED_CHANGE	BIT(2)
+#define IP101A_G_IRQ_DUPLEX_CHANGE	BIT(1)
+#define IP101A_G_IRQ_LINK_CHANGE	BIT(0)
+
+#define IP101G_DIGITAL_IO_SPEC_CTRL			0x1d
+#define IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32		BIT(2)
+
+/* The 32-pin IP101GR package can re-configure the mode of the RXER/INTR_32 pin
+ * (pin number 21). The hardware default is RXER (receive error) mode. But it
+ * can be configured to interrupt mode manually.
+ */
+enum ip101gr_sel_intr32 {
+	IP101GR_SEL_INTR32_KEEP,
+	IP101GR_SEL_INTR32_INTR,
+	IP101GR_SEL_INTR32_RXER,
+};
+
+struct ip101a_g_phy_priv {
+	enum ip101gr_sel_intr32 sel_intr32;
+};
 
 static int ip175c_config_init(struct phy_device *phydev)
 {
@@ -162,26 +183,6 @@ static int ip1001_config_init(struct phy_device *phydev)
 	return 0;
 }
 
-static int ip101a_g_config_init(struct phy_device *phydev)
-{
-	int c;
-
-	c = ip1xx_reset(phydev);
-	if (c < 0)
-		return c;
-
-	/* INTR pin used: speed/link/duplex will cause an interrupt */
-	c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
-	if (c < 0)
-		return c;
-
-	/* Enable Auto Power Saving mode */
-	c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
-	c |= IP101A_G_APS_ON;
-
-	return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
-}
-
 static int ip175c_read_status(struct phy_device *phydev)
 {
 	if (phydev->mdio.addr == 4) /* WAN port */
@@ -201,6 +202,106 @@ static int ip175c_config_aneg(struct phy_device *phydev)
 	return 0;
 }
 
+static int ip101a_g_probe(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+	struct ip101a_g_phy_priv *priv;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	/* Both functions (RX error and interrupt status) are sharing the same
+	 * pin on the 32-pin IP101GR, so this is an exclusive choice.
+	 */
+	if (device_property_read_bool(dev, "icplus,select-rx-error") &&
+	    device_property_read_bool(dev, "icplus,select-interrupt")) {
+		dev_err(dev,
+			"RXER and INTR mode cannot be selected together\n");
+		return -EINVAL;
+	}
+
+	if (device_property_read_bool(dev, "icplus,select-rx-error"))
+		priv->sel_intr32 = IP101GR_SEL_INTR32_RXER;
+	else if (device_property_read_bool(dev, "icplus,select-interrupt"))
+		priv->sel_intr32 = IP101GR_SEL_INTR32_INTR;
+	else
+		priv->sel_intr32 = IP101GR_SEL_INTR32_KEEP;
+
+	phydev->priv = priv;
+
+	return 0;
+}
+
+static int ip101a_g_config_init(struct phy_device *phydev)
+{
+	struct ip101a_g_phy_priv *priv = phydev->priv;
+	int err, c;
+
+	c = ip1xx_reset(phydev);
+	if (c < 0)
+		return c;
+
+	/* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */
+	switch (priv->sel_intr32) {
+	case IP101GR_SEL_INTR32_RXER:
+		err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+				 IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, 0);
+		if (err < 0)
+			return err;
+		break;
+
+	case IP101GR_SEL_INTR32_INTR:
+		err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+				 IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32,
+				 IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32);
+		if (err < 0)
+			return err;
+		break;
+
+	default:
+		/* Don't touch IP101G_DIGITAL_IO_SPEC_CTRL because it's not
+		 * documented on IP101A and it's not clear whether this would
+		 * cause problems.
+		 * For the 32-pin IP101GR we simply keep the SEL_INTR32
+		 * configuration as set by the bootloader when not configured
+		 * to one of the special functions.
+		 */
+		break;
+	}
+
+	/* Enable Auto Power Saving mode */
+	c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
+	c |= IP101A_G_APS_ON;
+
+	return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
+}
+
+static int ip101a_g_config_intr(struct phy_device *phydev)
+{
+	u16 val;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		/* INTR pin used: Speed/link/duplex will cause an interrupt */
+		val = IP101A_G_IRQ_PIN_USED;
+	else
+		val = IP101A_G_IRQ_ALL_MASK;
+
+	return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
+}
+
+static int ip101a_g_did_interrupt(struct phy_device *phydev)
+{
+	int val = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+
+	if (val < 0)
+		return 0;
+
+	return val & (IP101A_G_IRQ_SPEED_CHANGE |
+		      IP101A_G_IRQ_DUPLEX_CHANGE |
+		      IP101A_G_IRQ_LINK_CHANGE);
+}
+
 static int ip101a_g_ack_interrupt(struct phy_device *phydev)
 {
 	int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
@@ -234,7 +335,9 @@ static struct phy_driver icplus_driver[] = {
 	.name		= "ICPlus IP101A/G",
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
+	.probe		= ip101a_g_probe,
+	.config_intr	= ip101a_g_config_intr,
+	.did_interrupt	= ip101a_g_did_interrupt,
 	.ack_interrupt	= ip101a_g_ack_interrupt,
 	.config_init	= &ip101a_g_config_init,
 	.suspend	= genphy_suspend,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 7d936fb..fc0f502 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -242,7 +242,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
@@ -255,7 +254,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY22F (PEF 7061) v1.3",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
@@ -268,7 +266,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
@@ -281,7 +278,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY22F (PEF 7061) v1.4",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
@@ -294,7 +290,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
@@ -306,7 +301,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
@@ -318,7 +312,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY11G (xRX v1.1 integrated)",
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
@@ -330,7 +323,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY22F (xRX v1.1 integrated)",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
@@ -342,7 +334,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY11G (xRX v1.2 integrated)",
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
@@ -354,7 +345,6 @@ static struct phy_driver xway_gphy[] = {
 		.phy_id_mask	= 0xffffffff,
 		.name		= "Intel XWAY PHY22F (xRX v1.2 integrated)",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index c14b254..c8bb29a 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -177,7 +177,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
 			*/
 		} while (lpa == adv && retry--);
 
-		phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa);
+		mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
 
 		lpa &= adv;
 
@@ -218,7 +218,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
 			phydev->speed = SPEED_10;
 
 		phydev->pause = phydev->asym_pause = 0;
-		phydev->lp_advertising = 0;
+		linkmode_zero(phydev->lp_advertising);
 	}
 
 	return 0;
@@ -257,7 +257,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.name		= "LXT970",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= lxt970_config_init,
 	.ack_interrupt	= lxt970_ack_interrupt,
 	.config_intr	= lxt970_config_intr,
@@ -266,7 +265,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.name		= "LXT971",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.ack_interrupt	= lxt971_ack_interrupt,
 	.config_intr	= lxt971_config_intr,
 }, {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cbec296..6a98819 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -491,25 +491,26 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
 }
 
 /**
- * ethtool_adv_to_fiber_adv_t
- * @ethadv: the ethtool advertisement settings
+ * linkmode_adv_to_fiber_adv_t
+ * @advertise: the linkmode advertisement settings
  *
- * A small helper function that translates ethtool advertisement
- * settings to phy autonegotiation advertisements for the
- * MII_ADV register for fiber link.
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the MII_ADV
+ * register for fiber link.
  */
-static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv)
+static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise)
 {
 	u32 result = 0;
 
-	if (ethadv & ADVERTISED_1000baseT_Half)
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise))
 		result |= ADVERTISE_FIBER_1000HALF;
-	if (ethadv & ADVERTISED_1000baseT_Full)
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise))
 		result |= ADVERTISE_FIBER_1000FULL;
 
-	if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP))
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) &&
+	    linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
 		result |= LPA_PAUSE_ASYM_FIBER;
-	else if (ethadv & ADVERTISE_PAUSE_CAP)
+	else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
 		result |= (ADVERTISE_PAUSE_FIBER
 			   & (~ADVERTISE_PAUSE_ASYM_FIBER));
 
@@ -530,14 +531,13 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
 	int changed = 0;
 	int err;
 	int adv, oldadv;
-	u32 advertise;
 
 	if (phydev->autoneg != AUTONEG_ENABLE)
 		return genphy_setup_forced(phydev);
 
 	/* Only allow advertising what this PHY supports */
-	phydev->advertising &= phydev->supported;
-	advertise = phydev->advertising;
+	linkmode_and(phydev->advertising, phydev->advertising,
+		     phydev->supported);
 
 	/* Setup fiber advertisement */
 	adv = phy_read(phydev, MII_ADVERTISE);
@@ -547,7 +547,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
 	oldadv = adv;
 	adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
 		| LPA_PAUSE_FIBER);
-	adv |= ethtool_adv_to_fiber_adv_t(advertise);
+	adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising);
 
 	if (adv != oldadv) {
 		err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
 
 	/* SGMII-to-Copper mode initialization */
 	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
-		u32 pause;
 
 		/* Select page 18 */
 		err = marvell_set_page(phydev, 18);
@@ -878,9 +877,14 @@ static int m88e1510_config_init(struct phy_device *phydev)
 		 * This means we can never be truely sure what was advertised,
 		 * so disable Pause support.
 		 */
-		pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-		phydev->supported &= ~pause;
-		phydev->advertising &= ~pause;
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				   phydev->supported);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				   phydev->supported);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				   phydev->advertising);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				   phydev->advertising);
 	}
 
 	return m88e1318_config_init(phydev);
@@ -1043,22 +1047,21 @@ static int m88e1145_config_init(struct phy_device *phydev)
 }
 
 /**
- * fiber_lpa_to_ethtool_lpa_t
+ * fiber_lpa_to_linkmode_lpa_t
+ * @advertising: the linkmode advertisement settings
  * @lpa: value of the MII_LPA register for fiber link
  *
  * A small helper function that translates MII_LPA
- * bits to ethtool LP advertisement settings.
+ * bits to linkmode LP advertisement settings.
  */
-static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa)
+static void fiber_lpa_to_linkmode_lpa_t(unsigned long *advertising, u32 lpa)
 {
-	u32 result = 0;
-
 	if (lpa & LPA_FIBER_1000HALF)
-		result |= ADVERTISED_1000baseT_Half;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				 advertising);
 	if (lpa & LPA_FIBER_1000FULL)
-		result |= ADVERTISED_1000baseT_Full;
-
-	return result;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				 advertising);
 }
 
 /**
@@ -1134,9 +1137,8 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
 	}
 
 	if (!fiber) {
-		phydev->lp_advertising =
-			mii_stat1000_to_ethtool_lpa_t(lpagb) |
-			mii_lpa_to_ethtool_lpa_t(lpa);
+		mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
+		mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, lpagb);
 
 		if (phydev->duplex == DUPLEX_FULL) {
 			phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
@@ -1144,7 +1146,7 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
 		}
 	} else {
 		/* The fiber link is only 1000M capable */
-		phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
+		fiber_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
 
 		if (phydev->duplex == DUPLEX_FULL) {
 			if (!(lpa & LPA_PAUSE_FIBER)) {
@@ -1183,7 +1185,7 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev)
 
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
-	phydev->lp_advertising = 0;
+	linkmode_zero(phydev->lp_advertising);
 
 	return 0;
 }
@@ -1235,7 +1237,8 @@ static int marvell_read_status(struct phy_device *phydev)
 	int err;
 
 	/* Check the fiber mode first */
-	if (phydev->supported & SUPPORTED_FIBRE &&
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+			      phydev->supported) &&
 	    phydev->interface != PHY_INTERFACE_MODE_SGMII) {
 		err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
 		if (err < 0)
@@ -1278,7 +1281,8 @@ static int marvell_suspend(struct phy_device *phydev)
 	int err;
 
 	/* Suspend the fiber mode first */
-	if (!(phydev->supported & SUPPORTED_FIBRE)) {
+	if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+			       phydev->supported)) {
 		err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
 		if (err < 0)
 			goto error;
@@ -1312,7 +1316,8 @@ static int marvell_resume(struct phy_device *phydev)
 	int err;
 
 	/* Resume the fiber mode first */
-	if (!(phydev->supported & SUPPORTED_FIBRE)) {
+	if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+			       phydev->supported)) {
 		err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
 		if (err < 0)
 			goto error;
@@ -1463,7 +1468,8 @@ static int m88e1318_set_wol(struct phy_device *phydev,
 
 static int marvell_get_sset_count(struct phy_device *phydev)
 {
-	if (phydev->supported & SUPPORTED_FIBRE)
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+			      phydev->supported))
 		return ARRAY_SIZE(marvell_hw_stats);
 	else
 		return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS;
@@ -2005,7 +2011,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1101",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1101_config_aneg,
@@ -2024,7 +2029,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1112",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1111_config_init,
 		.config_aneg = &marvell_config_aneg,
@@ -2043,7 +2047,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1111",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1111_config_init,
 		.config_aneg = &marvell_config_aneg,
@@ -2063,7 +2066,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1118",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1118_config_init,
 		.config_aneg = &m88e1118_config_aneg,
@@ -2082,7 +2084,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1121R",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = &m88e1121_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1121_config_aneg,
@@ -2103,7 +2104,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1318S",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1318_config_init,
 		.config_aneg = &m88e1318_config_aneg,
@@ -2126,7 +2126,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1145",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1145_config_init,
 		.config_aneg = &m88e1101_config_aneg,
@@ -2146,7 +2145,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1149R",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1149_config_init,
 		.config_aneg = &m88e1118_config_aneg,
@@ -2165,7 +2163,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1240",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1111_config_init,
 		.config_aneg = &marvell_config_aneg,
@@ -2184,7 +2181,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1116R",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1116r_config_init,
 		.ack_interrupt = &marvell_ack_interrupt,
@@ -2202,7 +2198,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1510",
 		.features = PHY_GBIT_FIBRE_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = &m88e1510_probe,
 		.config_init = &m88e1510_config_init,
 		.config_aneg = &m88e1510_config_aneg,
@@ -2226,7 +2221,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1540",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = m88e1510_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1510_config_aneg,
@@ -2248,7 +2242,6 @@ static struct phy_driver marvell_drivers[] = {
 		.name = "Marvell 88E1545",
 		.probe = m88e1510_probe,
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1510_config_aneg,
 		.read_status = &marvell_read_status,
@@ -2268,7 +2261,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E3016",
 		.features = PHY_BASIC_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e3016_config_init,
 		.aneg_done = &marvell_aneg_done,
@@ -2289,7 +2281,6 @@ static struct phy_driver marvell_drivers[] = {
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E6390",
 		.features = PHY_GBIT_FEATURES,
-		.flags = PHY_HAS_INTERRUPT,
 		.probe = m88e6390_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1510_config_aneg,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1c9d039..6f6e886 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -252,7 +252,6 @@ static int mv3310_resume(struct phy_device *phydev)
 static int mv3310_config_init(struct phy_device *phydev)
 {
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
-	u32 mask;
 	int val;
 
 	/* Check that the PHY interface type is compatible */
@@ -336,13 +335,9 @@ static int mv3310_config_init(struct phy_device *phydev)
 		}
 	}
 
-	if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
-		phydev_warn(phydev,
-			    "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
-			    __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
-
-	phydev->supported &= mask;
-	phydev->advertising &= phydev->supported;
+	linkmode_copy(phydev->supported, supported);
+	linkmode_and(phydev->advertising, phydev->advertising,
+		     phydev->supported);
 
 	return 0;
 }
@@ -350,7 +345,7 @@ static int mv3310_config_init(struct phy_device *phydev)
 static int mv3310_config_aneg(struct phy_device *phydev)
 {
 	bool changed = false;
-	u32 advertising;
+	u16 reg;
 	int ret;
 
 	/* We don't support manual MDI control */
@@ -364,31 +359,35 @@ static int mv3310_config_aneg(struct phy_device *phydev)
 		return genphy_c45_an_disable_aneg(phydev);
 	}
 
-	phydev->advertising &= phydev->supported;
-	advertising = phydev->advertising;
+	linkmode_and(phydev->advertising, phydev->advertising,
+		     phydev->supported);
 
 	ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
 			    ADVERTISE_ALL | ADVERTISE_100BASE4 |
 			    ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
-			    ethtool_adv_to_mii_adv_t(advertising));
+			    linkmode_adv_to_mii_adv_t(phydev->advertising));
 	if (ret < 0)
 		return ret;
 	if (ret > 0)
 		changed = true;
 
+	reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
 	ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
-			    ADVERTISE_1000FULL | ADVERTISE_1000HALF,
-			    ethtool_adv_to_mii_ctrl1000_t(advertising));
+			    ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
 	if (ret < 0)
 		return ret;
 	if (ret > 0)
 		changed = true;
 
 	/* 10G control register */
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+			      phydev->advertising))
+		reg = MDIO_AN_10GBT_CTRL_ADV10G;
+	else
+		reg = 0;
+
 	ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
-			    MDIO_AN_10GBT_CTRL_ADV10G,
-			    advertising & ADVERTISED_10000baseT_Full ?
-				MDIO_AN_10GBT_CTRL_ADV10G : 0);
+			    MDIO_AN_10GBT_CTRL_ADV10G, reg);
 	if (ret < 0)
 		return ret;
 	if (ret > 0)
@@ -458,7 +457,7 @@ static int mv3310_read_status(struct phy_device *phydev)
 
 	phydev->speed = SPEED_UNKNOWN;
 	phydev->duplex = DUPLEX_UNKNOWN;
-	phydev->lp_advertising = 0;
+	linkmode_zero(phydev->lp_advertising);
 	phydev->link = 0;
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
@@ -491,7 +490,7 @@ static int mv3310_read_status(struct phy_device *phydev)
 		if (val < 0)
 			return val;
 
-		phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
+		mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, val);
 
 		if (phydev->autoneg == AUTONEG_ENABLE)
 			phy_resolve_aneg_linkmode(phydev);
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index ddc2c5e..b03bcf2 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -232,7 +232,7 @@ static struct phy_driver meson_gxl_phy[] = {
 		.phy_id_mask	= 0xfffffff0,
 		.name		= "Meson GXL Internal PHY",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
+		.flags		= PHY_IS_INTERNAL,
 		.config_init	= meson_gxl_config_init,
 		.aneg_done      = genphy_aneg_done,
 		.read_status	= meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9265dea..c333847 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -311,17 +311,22 @@ static int kszphy_config_init(struct phy_device *phydev)
 
 static int ksz8041_config_init(struct phy_device *phydev)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
 	struct device_node *of_node = phydev->mdio.dev.of_node;
 
 	/* Limit supported and advertised modes in fiber mode */
 	if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
 		phydev->dev_flags |= MICREL_PHY_FXEN;
-		phydev->supported &= SUPPORTED_100baseT_Full |
-				     SUPPORTED_100baseT_Half;
-		phydev->supported |= SUPPORTED_FIBRE;
-		phydev->advertising &= ADVERTISED_100baseT_Full |
-				       ADVERTISED_100baseT_Half;
-		phydev->advertising |= ADVERTISED_FIBRE;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+
+		linkmode_and(phydev->supported, phydev->supported, mask);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+				 phydev->supported);
+		linkmode_and(phydev->advertising, phydev->advertising, mask);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+				 phydev->advertising);
 		phydev->autoneg = AUTONEG_DISABLE;
 	}
 
@@ -918,7 +923,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KS8737",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ks8737_type,
 	.config_init	= kszphy_config_init,
 	.ack_interrupt	= kszphy_ack_interrupt,
@@ -930,7 +934,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= 0x00ffffff,
 	.name		= "Micrel KSZ8021 or KSZ8031",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8021_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
@@ -946,7 +949,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= 0x00ffffff,
 	.name		= "Micrel KSZ8031",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8021_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
@@ -962,7 +964,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8041",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8041_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz8041_config_init,
@@ -979,7 +980,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8041RNLI",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8041_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
@@ -995,7 +995,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8051",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8051_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
@@ -1011,7 +1010,6 @@ static struct phy_driver ksphy_driver[] = {
 	.name		= "Micrel KSZ8001 or KS8721",
 	.phy_id_mask	= 0x00fffffc,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8041_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
@@ -1027,7 +1025,6 @@ static struct phy_driver ksphy_driver[] = {
 	.name		= "Micrel KSZ8081 or KSZ8091",
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8081_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
@@ -1043,7 +1040,6 @@ static struct phy_driver ksphy_driver[] = {
 	.name		= "Micrel KSZ8061",
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
@@ -1054,7 +1050,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= 0x000ffffe,
 	.name		= "Micrel KSZ9021 Gigabit PHY",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz9021_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz9021_config_init,
@@ -1072,7 +1067,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ9031 Gigabit PHY",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz9021_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz9031_config_init,
@@ -1089,7 +1083,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Microchip KSZ9131 Gigabit PHY",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz9021_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz9131_config_init,
@@ -1115,7 +1108,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ886X Switch",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
@@ -1124,7 +1116,6 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8795",
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.config_aneg	= ksz8873mll_config_aneg,
 	.read_status	= ksz8873mll_read_status,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 04b12e3..7557beb 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -346,7 +346,6 @@ static struct phy_driver microchip_phy_driver[] = {
 	.name		= "Microchip LAN88xx",
 
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 
 	.probe		= lan88xx_probe,
 	.remove		= lan88xx_remove,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index c600a85..3d09b47 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -47,7 +47,6 @@ static struct phy_driver microchip_t1_phy_driver[] = {
 		.name           = "Microchip LAN87xx T1",
 
 		.features       = PHY_BASIC_T1_FEATURES,
-		.flags          = PHY_HAS_INTERRUPT,
 
 		.config_init    = genphy_config_init,
 		.config_aneg    = genphy_config_aneg,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 7cae175..cfe680f 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -1829,7 +1829,6 @@ static struct phy_driver vsc85xx_driver[] = {
 	.name		= "Microsemi FE VSC8530",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.soft_reset	= &genphy_soft_reset,
 	.config_init	= &vsc85xx_config_init,
 	.config_aneg    = &vsc85xx_config_aneg,
@@ -1855,7 +1854,6 @@ static struct phy_driver vsc85xx_driver[] = {
 	.name		= "Microsemi VSC8531",
 	.phy_id_mask    = 0xfffffff0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.soft_reset	= &genphy_soft_reset,
 	.config_init    = &vsc85xx_config_init,
 	.config_aneg    = &vsc85xx_config_aneg,
@@ -1881,7 +1879,6 @@ static struct phy_driver vsc85xx_driver[] = {
 	.name		= "Microsemi FE VSC8540 SyncE",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.soft_reset	= &genphy_soft_reset,
 	.config_init	= &vsc85xx_config_init,
 	.config_aneg	= &vsc85xx_config_aneg,
@@ -1907,7 +1904,6 @@ static struct phy_driver vsc85xx_driver[] = {
 	.name		= "Microsemi VSC8541 SyncE",
 	.phy_id_mask    = 0xfffffff0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.soft_reset	= &genphy_soft_reset,
 	.config_init    = &vsc85xx_config_init,
 	.config_aneg    = &vsc85xx_config_aneg,
@@ -1933,7 +1929,6 @@ static struct phy_driver vsc85xx_driver[] = {
 	.name		= "Microsemi GE VSC8574 SyncE",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.soft_reset	= &genphy_soft_reset,
 	.config_init    = &vsc8584_config_init,
 	.config_aneg    = &vsc85xx_config_aneg,
@@ -1960,7 +1955,6 @@ static struct phy_driver vsc85xx_driver[] = {
 	.name		= "Microsemi GE VSC8584 SyncE",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.soft_reset	= &genphy_soft_reset,
 	.config_init    = &vsc8584_config_init,
 	.config_aneg    = &vsc85xx_config_aneg,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2b1e336..139bed2 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -134,7 +134,6 @@ static struct phy_driver dp83865_driver[] = { {
 	.phy_id_mask = 0xfffffff0,
 	.name = "NatSemi DP83865",
 	.features = PHY_GBIT_FEATURES,
-	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ns_config_init,
 	.ack_interrupt = ns_ack_interrupt,
 	.config_intr = ns_config_intr,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index d7636ff..03af927 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -181,7 +181,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
 	if (val < 0)
 		return val;
 
-	phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val);
+	mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val);
 	phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
 	phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
 
@@ -191,7 +191,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
 		return val;
 
 	if (val & MDIO_AN_10GBT_STAT_LP10G)
-		phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+				 phydev->lp_advertising);
 
 	return 0;
 }
@@ -304,8 +305,11 @@ EXPORT_SYMBOL_GPL(gen10g_no_soft_reset);
 int gen10g_config_init(struct phy_device *phydev)
 {
 	/* Temporarily just say we support everything */
-	phydev->supported = SUPPORTED_10000baseT_Full;
-	phydev->advertising = SUPPORTED_10000baseT_Full;
+	linkmode_zero(phydev->supported);
+
+	linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+			 phydev->supported);
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	return 0;
 }
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index c7da4cb..20fbd5e 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -62,6 +62,124 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
  * must be grouped by speed and sorted in descending match priority
  * - iow, descending speed. */
 static const struct phy_setting settings[] = {
+	/* 100G */
+	{
+		.speed = SPEED_100000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_100000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_100000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+	},
+	{
+		.speed = SPEED_100000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+	},
+	/* 56G */
+	{
+		.speed = SPEED_56000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_56000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_56000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_56000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+	},
+	/* 50G */
+	{
+		.speed = SPEED_50000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+	},
+	{
+		.speed = SPEED_50000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+	},
+	{
+		.speed = SPEED_50000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+	},
+	/* 40G */
+	{
+		.speed = SPEED_40000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_40000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_40000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+	},
+	{
+		.speed = SPEED_40000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+	},
+	/* 25G */
+	{
+		.speed = SPEED_25000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+	},
+	{
+		.speed = SPEED_25000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+	},
+	{
+		.speed = SPEED_25000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+	},
+
+	/* 20G */
+	{
+		.speed = SPEED_20000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+	},
+	{
+		.speed = SPEED_20000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+	},
+	/* 10G */
+	{
+		.speed = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+	},
+	{
+		.speed = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+	},
 	{
 		.speed = SPEED_10000,
 		.duplex = DUPLEX_FULL,
@@ -75,13 +193,47 @@ static const struct phy_setting settings[] = {
 	{
 		.speed = SPEED_10000,
 		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+	},
+	{
+		.speed = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+	},
+	{
+		.speed = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+	},
+	{
+		.speed = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+	},
+	{
+		.speed = SPEED_10000,
+		.duplex = DUPLEX_FULL,
 		.bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
 	},
+	/* 5G */
+	{
+		.speed = SPEED_5000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+	},
+
+	/* 2.5G */
+	{
+		.speed = SPEED_2500,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+	},
 	{
 		.speed = SPEED_2500,
 		.duplex = DUPLEX_FULL,
 		.bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
 	},
+	/* 1G */
 	{
 		.speed = SPEED_1000,
 		.duplex = DUPLEX_FULL,
@@ -90,11 +242,6 @@ static const struct phy_setting settings[] = {
 	{
 		.speed = SPEED_1000,
 		.duplex = DUPLEX_FULL,
-		.bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
-	},
-	{
-		.speed = SPEED_1000,
-		.duplex = DUPLEX_FULL,
 		.bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
 	},
 	{
@@ -103,6 +250,12 @@ static const struct phy_setting settings[] = {
 		.bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
 	},
 	{
+		.speed = SPEED_1000,
+		.duplex = DUPLEX_FULL,
+		.bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+	},
+	/* 100M */
+	{
 		.speed = SPEED_100,
 		.duplex = DUPLEX_FULL,
 		.bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
@@ -112,6 +265,7 @@ static const struct phy_setting settings[] = {
 		.duplex = DUPLEX_HALF,
 		.bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
 	},
+	/* 10M */
 	{
 		.speed = SPEED_10,
 		.duplex = DUPLEX_FULL,
@@ -129,7 +283,6 @@ static const struct phy_setting settings[] = {
  * @speed: speed to match
  * @duplex: duplex to match
  * @mask: allowed link modes
- * @maxbit: bit size of link modes
  * @exact: an exact match is required
  *
  * Search the settings array for a setting that matches the speed and
@@ -143,14 +296,14 @@ static const struct phy_setting settings[] = {
  * they all fail, %NULL will be returned.
  */
 const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
-		   size_t maxbit, bool exact)
+phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact)
 {
 	const struct phy_setting *p, *match = NULL, *last = NULL;
 	int i;
 
 	for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
-		if (p->bit < maxbit && test_bit(p->bit, mask)) {
+		if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
+		    test_bit(p->bit, mask)) {
 			last = p;
 			if (p->speed == speed && p->duplex == duplex) {
 				/* Exact match for speed and duplex */
@@ -175,13 +328,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
 EXPORT_SYMBOL_GPL(phy_lookup_setting);
 
 size_t phy_speeds(unsigned int *speeds, size_t size,
-		  unsigned long *mask, size_t maxbit)
+		  unsigned long *mask)
 {
 	size_t count;
 	int i;
 
 	for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++)
-		if (settings[i].bit < maxbit &&
+		if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
 		    test_bit(settings[i].bit, mask) &&
 		    (count == 0 || speeds[count - 1] != settings[i].speed))
 			speeds[count++] = settings[i].speed;
@@ -199,35 +352,53 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
  */
 void phy_resolve_aneg_linkmode(struct phy_device *phydev)
 {
-	u32 common = phydev->lp_advertising & phydev->advertising;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
 
-	if (common & ADVERTISED_10000baseT_Full) {
+	linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) {
 		phydev->speed = SPEED_10000;
 		phydev->duplex = DUPLEX_FULL;
-	} else if (common & ADVERTISED_1000baseT_Full) {
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+				     common)) {
+		phydev->speed = SPEED_5000;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+				     common)) {
+		phydev->speed = SPEED_2500;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				     common)) {
 		phydev->speed = SPEED_1000;
 		phydev->duplex = DUPLEX_FULL;
-	} else if (common & ADVERTISED_1000baseT_Half) {
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				     common)) {
 		phydev->speed = SPEED_1000;
 		phydev->duplex = DUPLEX_HALF;
-	} else if (common & ADVERTISED_100baseT_Full) {
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+				     common)) {
 		phydev->speed = SPEED_100;
 		phydev->duplex = DUPLEX_FULL;
-	} else if (common & ADVERTISED_100baseT_Half) {
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+				     common)) {
 		phydev->speed = SPEED_100;
 		phydev->duplex = DUPLEX_HALF;
-	} else if (common & ADVERTISED_10baseT_Full) {
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+				     common)) {
 		phydev->speed = SPEED_10;
 		phydev->duplex = DUPLEX_FULL;
-	} else if (common & ADVERTISED_10baseT_Half) {
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+				     common)) {
 		phydev->speed = SPEED_10;
 		phydev->duplex = DUPLEX_HALF;
 	}
 
 	if (phydev->duplex == DUPLEX_FULL) {
-		phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
-		phydev->asym_pause = !!(phydev->lp_advertising &
-					ADVERTISED_Asym_Pause);
+		phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+						  phydev->lp_advertising);
+		phydev->asym_pause = linkmode_test_bit(
+			ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+			phydev->lp_advertising);
 	}
 }
 EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1d73ac3..376a0d8 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -46,11 +46,8 @@ static const char *phy_state_to_str(enum phy_state st)
 {
 	switch (st) {
 	PHY_STATE_STR(DOWN)
-	PHY_STATE_STR(STARTING)
 	PHY_STATE_STR(READY)
-	PHY_STATE_STR(PENDING)
 	PHY_STATE_STR(UP)
-	PHY_STATE_STR(AN)
 	PHY_STATE_STR(RUNNING)
 	PHY_STATE_STR(NOLINK)
 	PHY_STATE_STR(FORCING)
@@ -62,6 +59,17 @@ static const char *phy_state_to_str(enum phy_state st)
 	return NULL;
 }
 
+static void phy_link_up(struct phy_device *phydev)
+{
+	phydev->phy_link_change(phydev, true, true);
+	phy_led_trigger_change_speed(phydev);
+}
+
+static void phy_link_down(struct phy_device *phydev, bool do_carrier)
+{
+	phydev->phy_link_change(phydev, false, do_carrier);
+	phy_led_trigger_change_speed(phydev);
+}
 
 /**
  * phy_print_status - Convenience function to print out the current phy status
@@ -105,9 +113,9 @@ static int phy_clear_interrupt(struct phy_device *phydev)
  *
  * Returns 0 on success or < 0 on error.
  */
-static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, bool interrupts)
 {
-	phydev->interrupts = interrupts;
+	phydev->interrupts = interrupts ? 1 : 0;
 	if (phydev->drv->config_intr)
 		return phydev->drv->config_intr(phydev);
 
@@ -171,11 +179,9 @@ EXPORT_SYMBOL(phy_aneg_done);
  * settings were found.
  */
 static const struct phy_setting *
-phy_find_valid(int speed, int duplex, u32 supported)
+phy_find_valid(int speed, int duplex, unsigned long *supported)
 {
-	unsigned long mask = supported;
-
-	return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false);
+	return phy_lookup_setting(speed, duplex, supported, false);
 }
 
 /**
@@ -192,9 +198,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
 				  unsigned int *speeds,
 				  unsigned int size)
 {
-	unsigned long supported = phy->supported;
-
-	return phy_speeds(speeds, size, &supported, BITS_PER_LONG);
+	return phy_speeds(speeds, size, phy->supported);
 }
 
 /**
@@ -206,11 +210,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
  *
  * Description: Returns true if there is a valid setting, false otherwise.
  */
-static inline bool phy_check_valid(int speed, int duplex, u32 features)
+static inline bool phy_check_valid(int speed, int duplex,
+				   unsigned long *features)
 {
-	unsigned long mask = features;
-
-	return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true);
+	return !!phy_lookup_setting(speed, duplex, features, true);
 }
 
 /**
@@ -224,13 +227,13 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features)
 static void phy_sanitize_settings(struct phy_device *phydev)
 {
 	const struct phy_setting *setting;
-	u32 features = phydev->supported;
 
 	/* Sanitize settings based on PHY capabilities */
-	if ((features & SUPPORTED_Autoneg) == 0)
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported))
 		phydev->autoneg = AUTONEG_DISABLE;
 
-	setting = phy_find_valid(phydev->speed, phydev->duplex, features);
+	setting = phy_find_valid(phydev->speed, phydev->duplex,
+				 phydev->supported);
 	if (setting) {
 		phydev->speed = setting->speed;
 		phydev->duplex = setting->duplex;
@@ -256,13 +259,15 @@ static void phy_sanitize_settings(struct phy_device *phydev)
  */
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
 	u32 speed = ethtool_cmd_speed(cmd);
 
 	if (cmd->phy_address != phydev->mdio.addr)
 		return -EINVAL;
 
 	/* We make sure that we don't pass unsupported values in to the PHY */
-	cmd->advertising &= phydev->supported;
+	ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising);
+	linkmode_and(advertising, advertising, phydev->supported);
 
 	/* Verify the settings we care about. */
 	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
@@ -283,12 +288,14 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 
 	phydev->speed = speed;
 
-	phydev->advertising = cmd->advertising;
+	linkmode_copy(phydev->advertising, advertising);
 
 	if (AUTONEG_ENABLE == cmd->autoneg)
-		phydev->advertising |= ADVERTISED_Autoneg;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				 phydev->advertising);
 	else
-		phydev->advertising &= ~ADVERTISED_Autoneg;
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				   phydev->advertising);
 
 	phydev->duplex = cmd->duplex;
 
@@ -304,25 +311,24 @@ EXPORT_SYMBOL(phy_ethtool_sset);
 int phy_ethtool_ksettings_set(struct phy_device *phydev,
 			      const struct ethtool_link_ksettings *cmd)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
 	u8 autoneg = cmd->base.autoneg;
 	u8 duplex = cmd->base.duplex;
 	u32 speed = cmd->base.speed;
-	u32 advertising;
 
 	if (cmd->base.phy_address != phydev->mdio.addr)
 		return -EINVAL;
 
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						cmd->link_modes.advertising);
+	linkmode_copy(advertising, cmd->link_modes.advertising);
 
 	/* We make sure that we don't pass unsupported values in to the PHY */
-	advertising &= phydev->supported;
+	linkmode_and(advertising, advertising, phydev->supported);
 
 	/* Verify the settings we care about. */
 	if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
 		return -EINVAL;
 
-	if (autoneg == AUTONEG_ENABLE && advertising == 0)
+	if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
 		return -EINVAL;
 
 	if (autoneg == AUTONEG_DISABLE &&
@@ -337,12 +343,14 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
 
 	phydev->speed = speed;
 
-	phydev->advertising = advertising;
+	linkmode_copy(phydev->advertising, advertising);
 
 	if (autoneg == AUTONEG_ENABLE)
-		phydev->advertising |= ADVERTISED_Autoneg;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				 phydev->advertising);
 	else
-		phydev->advertising &= ~ADVERTISED_Autoneg;
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				   phydev->advertising);
 
 	phydev->duplex = duplex;
 
@@ -358,14 +366,9 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set);
 void phy_ethtool_ksettings_get(struct phy_device *phydev,
 			       struct ethtool_link_ksettings *cmd)
 {
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-						phydev->supported);
-
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-						phydev->advertising);
-
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
-						phydev->lp_advertising);
+	linkmode_copy(cmd->link_modes.supported, phydev->supported);
+	linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
+	linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
 
 	cmd->base.speed = phydev->speed;
 	cmd->base.duplex = phydev->duplex;
@@ -434,7 +437,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 				}
 				break;
 			case MII_ADVERTISE:
-				phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+				mii_adv_to_linkmode_adv_t(phydev->advertising,
+							  val);
 				change_autoneg = true;
 				break;
 			default:
@@ -467,6 +471,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 }
 EXPORT_SYMBOL(phy_mii_ioctl);
 
+static void phy_queue_state_machine(struct phy_device *phydev,
+				    unsigned int secs)
+{
+	mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+			 secs * HZ);
+}
+
+static void phy_trigger_machine(struct phy_device *phydev)
+{
+	phy_queue_state_machine(phydev, 0);
+}
+
 static int phy_config_aneg(struct phy_device *phydev)
 {
 	if (phydev->drv->config_aneg)
@@ -482,6 +498,34 @@ static int phy_config_aneg(struct phy_device *phydev)
 }
 
 /**
+ * phy_check_link_status - check link status and set state accordingly
+ * @phydev: the phy_device struct
+ *
+ * Description: Check for link and whether autoneg was triggered / is running
+ * and set state accordingly
+ */
+static int phy_check_link_status(struct phy_device *phydev)
+{
+	int err;
+
+	WARN_ON(!mutex_is_locked(&phydev->lock));
+
+	err = phy_read_status(phydev);
+	if (err)
+		return err;
+
+	if (phydev->link && phydev->state != PHY_RUNNING) {
+		phydev->state = PHY_RUNNING;
+		phy_link_up(phydev);
+	} else if (!phydev->link && phydev->state != PHY_NOLINK) {
+		phydev->state = PHY_NOLINK;
+		phy_link_down(phydev, true);
+	}
+
+	return 0;
+}
+
+/**
  * phy_start_aneg - start auto-negotiation for this PHY device
  * @phydev: the phy_device struct
  *
@@ -492,7 +536,6 @@ static int phy_config_aneg(struct phy_device *phydev)
  */
 int phy_start_aneg(struct phy_device *phydev)
 {
-	bool trigger = 0;
 	int err;
 
 	if (!phydev->drv)
@@ -504,7 +547,7 @@ int phy_start_aneg(struct phy_device *phydev)
 		phy_sanitize_settings(phydev);
 
 	/* Invalidate LP advertising flags */
-	phydev->lp_advertising = 0;
+	linkmode_zero(phydev->lp_advertising);
 
 	err = phy_config_aneg(phydev);
 	if (err < 0)
@@ -512,32 +555,16 @@ int phy_start_aneg(struct phy_device *phydev)
 
 	if (phydev->state != PHY_HALTED) {
 		if (AUTONEG_ENABLE == phydev->autoneg) {
-			phydev->state = PHY_AN;
-			phydev->link_timeout = PHY_AN_TIMEOUT;
+			err = phy_check_link_status(phydev);
 		} else {
 			phydev->state = PHY_FORCING;
 			phydev->link_timeout = PHY_FORCE_TIMEOUT;
 		}
 	}
 
-	/* Re-schedule a PHY state machine to check PHY status because
-	 * negotiation may already be done and aneg interrupt may not be
-	 * generated.
-	 */
-	if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) {
-		err = phy_aneg_done(phydev);
-		if (err > 0) {
-			trigger = true;
-			err = 0;
-		}
-	}
-
 out_unlock:
 	mutex_unlock(&phydev->lock);
 
-	if (trigger)
-		phy_trigger_machine(phydev);
-
 	return err;
 }
 EXPORT_SYMBOL(phy_start_aneg);
@@ -573,20 +600,38 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
  */
 int phy_speed_down(struct phy_device *phydev, bool sync)
 {
-	u32 adv = phydev->lp_advertising & phydev->supported;
-	u32 adv_old = phydev->advertising;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
 	int ret;
 
 	if (phydev->autoneg != AUTONEG_ENABLE)
 		return 0;
 
-	if (adv & PHY_10BT_FEATURES)
-		phydev->advertising &= ~(PHY_100BT_FEATURES |
-					 PHY_1000BT_FEATURES);
-	else if (adv & PHY_100BT_FEATURES)
-		phydev->advertising &= ~PHY_1000BT_FEATURES;
+	linkmode_copy(adv_old, phydev->advertising);
+	linkmode_copy(adv, phydev->lp_advertising);
+	linkmode_and(adv, adv, phydev->supported);
 
-	if (phydev->advertising == adv_old)
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) ||
+	    linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) {
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+				   phydev->advertising);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+				   phydev->advertising);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				   phydev->advertising);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				   phydev->advertising);
+	} else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+				     adv) ||
+		   linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+				     adv)) {
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				   phydev->advertising);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				   phydev->advertising);
+	}
+
+	if (linkmode_equal(phydev->advertising, adv_old))
 		return 0;
 
 	ret = phy_config_aneg(phydev);
@@ -605,28 +650,36 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
  */
 int phy_speed_up(struct phy_device *phydev)
 {
-	u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES;
-	u32 adv_old = phydev->advertising;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, };
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(speeds);
+
+	linkmode_copy(adv_old, phydev->advertising);
 
 	if (phydev->autoneg != AUTONEG_ENABLE)
 		return 0;
 
-	phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds);
 
-	if (phydev->advertising == adv_old)
+	linkmode_andnot(not_speeds, adv_old, all_speeds);
+	linkmode_copy(supported, phydev->supported);
+	linkmode_and(speeds, supported, all_speeds);
+	linkmode_or(phydev->advertising, not_speeds, speeds);
+
+	if (linkmode_equal(phydev->advertising, adv_old))
 		return 0;
 
 	return phy_config_aneg(phydev);
 }
 EXPORT_SYMBOL_GPL(phy_speed_up);
 
-static void phy_queue_state_machine(struct phy_device *phydev,
-				    unsigned int secs)
-{
-	mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
-			 secs * HZ);
-}
-
 /**
  * phy_start_machine - start PHY state machine tracking
  * @phydev: the phy_device struct
@@ -644,20 +697,6 @@ void phy_start_machine(struct phy_device *phydev)
 EXPORT_SYMBOL_GPL(phy_start_machine);
 
 /**
- * phy_trigger_machine - trigger the state machine to run
- *
- * @phydev: the phy_device struct
- *
- * Description: There has been a change in state which requires that the
- *   state machine runs.
- */
-
-void phy_trigger_machine(struct phy_device *phydev)
-{
-	phy_queue_state_machine(phydev, 0);
-}
-
-/**
  * phy_stop_machine - stop the PHY state machine tracking
  * @phydev: target phy_device struct
  *
@@ -711,57 +750,11 @@ static int phy_disable_interrupts(struct phy_device *phydev)
 }
 
 /**
- * phy_change - Called by the phy_interrupt to handle PHY changes
- * @phydev: phy_device struct that interrupted
- */
-static irqreturn_t phy_change(struct phy_device *phydev)
-{
-	if (phy_interrupt_is_valid(phydev)) {
-		if (phydev->drv->did_interrupt &&
-		    !phydev->drv->did_interrupt(phydev))
-			return IRQ_NONE;
-
-		if (phydev->state == PHY_HALTED)
-			if (phy_disable_interrupts(phydev))
-				goto phy_err;
-	}
-
-	mutex_lock(&phydev->lock);
-	if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
-		phydev->state = PHY_CHANGELINK;
-	mutex_unlock(&phydev->lock);
-
-	/* reschedule state queue work to run as soon as possible */
-	phy_trigger_machine(phydev);
-
-	if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
-		goto phy_err;
-	return IRQ_HANDLED;
-
-phy_err:
-	phy_error(phydev);
-	return IRQ_NONE;
-}
-
-/**
- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
- * @work: work_struct that describes the work to be done
- */
-void phy_change_work(struct work_struct *work)
-{
-	struct phy_device *phydev =
-		container_of(work, struct phy_device, phy_queue);
-
-	phy_change(phydev);
-}
-
-/**
  * phy_interrupt - PHY interrupt handler
  * @irq: interrupt line
  * @phy_dat: phy_device pointer
  *
- * Description: When a PHY interrupt occurs, the handler disables
- * interrupts, and uses phy_change to handle the interrupt.
+ * Description: Handle PHY interrupt
  */
 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
 {
@@ -770,7 +763,19 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
 	if (PHY_HALTED == phydev->state)
 		return IRQ_NONE;		/* It can't be ours.  */
 
-	return phy_change(phydev);
+	if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
+		return IRQ_NONE;
+
+	/* reschedule state queue work to run as soon as possible */
+	phy_trigger_machine(phydev);
+
+	if (phy_clear_interrupt(phydev))
+		goto phy_err;
+	return IRQ_HANDLED;
+
+phy_err:
+	phy_error(phydev);
+	return IRQ_NONE;
 }
 
 /**
@@ -851,7 +856,7 @@ void phy_stop(struct phy_device *phydev)
 	phy_state_machine(&phydev->state_queue.work);
 
 	/* Cannot call flush_scheduled_work() here as desired because
-	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+	 * of rtnl_lock(), but PHY_HALTED shall guarantee irq handler
 	 * will not reenable interrupts.
 	 */
 }
@@ -874,9 +879,6 @@ void phy_start(struct phy_device *phydev)
 	mutex_lock(&phydev->lock);
 
 	switch (phydev->state) {
-	case PHY_STARTING:
-		phydev->state = PHY_PENDING;
-		break;
 	case PHY_READY:
 		phydev->state = PHY_UP;
 		break;
@@ -902,18 +904,6 @@ void phy_start(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(phy_start);
 
-static void phy_link_up(struct phy_device *phydev)
-{
-	phydev->phy_link_change(phydev, true, true);
-	phy_led_trigger_change_speed(phydev);
-}
-
-static void phy_link_down(struct phy_device *phydev, bool do_carrier)
-{
-	phydev->phy_link_change(phydev, false, do_carrier);
-	phy_led_trigger_change_speed(phydev);
-}
-
 /**
  * phy_state_machine - Handle the state machine
  * @work: work_struct that describes the work to be done
@@ -936,63 +926,17 @@ void phy_state_machine(struct work_struct *work)
 
 	switch (phydev->state) {
 	case PHY_DOWN:
-	case PHY_STARTING:
 	case PHY_READY:
-	case PHY_PENDING:
 		break;
 	case PHY_UP:
 		needs_aneg = true;
 
-		phydev->link_timeout = PHY_AN_TIMEOUT;
-
-		break;
-	case PHY_AN:
-		err = phy_read_status(phydev);
-		if (err < 0)
-			break;
-
-		/* If the link is down, give up on negotiation for now */
-		if (!phydev->link) {
-			phydev->state = PHY_NOLINK;
-			phy_link_down(phydev, true);
-			break;
-		}
-
-		/* Check if negotiation is done.  Break if there's an error */
-		err = phy_aneg_done(phydev);
-		if (err < 0)
-			break;
-
-		/* If AN is done, we're running */
-		if (err > 0) {
-			phydev->state = PHY_RUNNING;
-			phy_link_up(phydev);
-		} else if (0 == phydev->link_timeout--)
-			needs_aneg = true;
 		break;
 	case PHY_NOLINK:
-		if (!phy_polling_mode(phydev))
-			break;
-
-		err = phy_read_status(phydev);
-		if (err)
-			break;
-
-		if (phydev->link) {
-			if (AUTONEG_ENABLE == phydev->autoneg) {
-				err = phy_aneg_done(phydev);
-				if (err < 0)
-					break;
-
-				if (!err) {
-					phydev->state = PHY_AN;
-					phydev->link_timeout = PHY_AN_TIMEOUT;
-					break;
-				}
-			}
-			phydev->state = PHY_RUNNING;
-			phy_link_up(phydev);
-		}
+	case PHY_RUNNING:
+	case PHY_CHANGELINK:
+	case PHY_RESUMING:
+		err = phy_check_link_status(phydev);
 		break;
 	case PHY_FORCING:
 		err = genphy_update_link(phydev);
@@ -1008,32 +952,6 @@ void phy_state_machine(struct work_struct *work)
 			phy_link_down(phydev, false);
 		}
 		break;
-	case PHY_RUNNING:
-		if (!phy_polling_mode(phydev))
-			break;
-
-		err = phy_read_status(phydev);
-		if (err)
-			break;
-
-		if (!phydev->link) {
-			phydev->state = PHY_NOLINK;
-			phy_link_down(phydev, true);
-		}
-		break;
-	case PHY_CHANGELINK:
-		err = phy_read_status(phydev);
-		if (err)
-			break;
-
-		if (phydev->link) {
-			phydev->state = PHY_RUNNING;
-			phy_link_up(phydev);
-		} else {
-			phydev->state = PHY_NOLINK;
-			phy_link_down(phydev, true);
-		}
-		break;
 	case PHY_HALTED:
 		if (phydev->link) {
 			phydev->link = 0;
@@ -1041,30 +959,6 @@ void phy_state_machine(struct work_struct *work)
 			do_suspend = true;
 		}
 		break;
-	case PHY_RESUMING:
-		if (AUTONEG_ENABLE == phydev->autoneg) {
-			err = phy_aneg_done(phydev);
-			if (err < 0) {
-				break;
-			} else if (!err) {
-				phydev->state = PHY_AN;
-				phydev->link_timeout = PHY_AN_TIMEOUT;
-				break;
-			}
-		}
-
-		err = phy_read_status(phydev);
-		if (err)
-			break;
-
-		if (phydev->link) {
-			phydev->state = PHY_RUNNING;
-			phy_link_up(phydev);
-		} else	{
-			phydev->state = PHY_NOLINK;
-			phy_link_down(phydev, false);
-		}
-		break;
 	}
 
 	mutex_unlock(&phydev->lock);
@@ -1104,10 +998,34 @@ void phy_state_machine(struct work_struct *work)
 void phy_mac_interrupt(struct phy_device *phydev)
 {
 	/* Trigger a state machine change */
-	queue_work(system_power_efficient_wq, &phydev->phy_queue);
+	phy_trigger_machine(phydev);
 }
 EXPORT_SYMBOL(phy_mac_interrupt);
 
+static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
+{
+	linkmode_zero(advertising);
+
+	if (eee_adv & MDIO_EEE_100TX)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+				 advertising);
+	if (eee_adv & MDIO_EEE_1000T)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				 advertising);
+	if (eee_adv & MDIO_EEE_10GT)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+				 advertising);
+	if (eee_adv & MDIO_EEE_1000KX)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+				 advertising);
+	if (eee_adv & MDIO_EEE_10GKX4)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+				 advertising);
+	if (eee_adv & MDIO_EEE_10GKR)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+				 advertising);
+}
+
 /**
  * phy_init_eee - init and check the EEE feature
  * @phydev: target phy_device struct
@@ -1126,9 +1044,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 	/* According to 802.3az,the EEE is supported only in full duplex-mode.
 	 */
 	if (phydev->duplex == DUPLEX_FULL) {
+		__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+		__ETHTOOL_DECLARE_LINK_MODE_MASK(lp);
+		__ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
 		int eee_lp, eee_cap, eee_adv;
-		u32 lp, cap, adv;
 		int status;
+		u32 cap;
 
 		/* Read phy status to properly get the right settings */
 		status = phy_read_status(phydev);
@@ -1155,9 +1076,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 		if (eee_adv <= 0)
 			goto eee_exit_err;
 
-		adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
-		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
-		if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+		mmd_eee_adv_to_linkmode(adv, eee_adv);
+		mmd_eee_adv_to_linkmode(lp, eee_lp);
+		linkmode_and(common, adv, lp);
+
+		if (!phy_check_valid(phydev->speed, phydev->duplex, common))
 			goto eee_exit_err;
 
 		if (clk_stop_enable) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ab33d17..e06613f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -66,10 +66,12 @@ static const int phy_basic_ports_array[] = {
 	ETHTOOL_LINK_MODE_TP_BIT,
 	ETHTOOL_LINK_MODE_MII_BIT,
 };
+EXPORT_SYMBOL_GPL(phy_basic_ports_array);
 
 static const int phy_fibre_port_array[] = {
 	ETHTOOL_LINK_MODE_FIBRE_BIT,
 };
+EXPORT_SYMBOL_GPL(phy_fibre_port_array);
 
 static const int phy_all_ports_features_array[] = {
 	ETHTOOL_LINK_MODE_Autoneg_BIT,
@@ -80,27 +82,32 @@ static const int phy_all_ports_features_array[] = {
 	ETHTOOL_LINK_MODE_BNC_BIT,
 	ETHTOOL_LINK_MODE_Backplane_BIT,
 };
+EXPORT_SYMBOL_GPL(phy_all_ports_features_array);
 
-static const int phy_10_100_features_array[] = {
+const int phy_10_100_features_array[4] = {
 	ETHTOOL_LINK_MODE_10baseT_Half_BIT,
 	ETHTOOL_LINK_MODE_10baseT_Full_BIT,
 	ETHTOOL_LINK_MODE_100baseT_Half_BIT,
 	ETHTOOL_LINK_MODE_100baseT_Full_BIT,
 };
+EXPORT_SYMBOL_GPL(phy_10_100_features_array);
 
-static const int phy_basic_t1_features_array[] = {
+const int phy_basic_t1_features_array[2] = {
 	ETHTOOL_LINK_MODE_TP_BIT,
 	ETHTOOL_LINK_MODE_100baseT_Full_BIT,
 };
+EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
 
-static const int phy_gbit_features_array[] = {
+const int phy_gbit_features_array[2] = {
 	ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
 	ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
 };
+EXPORT_SYMBOL_GPL(phy_gbit_features_array);
 
-static const int phy_10gbit_features_array[] = {
+const int phy_10gbit_features_array[1] = {
 	ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
 };
+EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
 
 __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
 EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
@@ -587,7 +594,6 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
 
 	mutex_init(&dev->lock);
 	INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
-	INIT_WORK(&dev->phy_queue, phy_change_work);
 
 	/* Request the appropriate module unconditionally; don't
 	 * bother trying to do so only if it isn't already loaded,
@@ -1442,8 +1448,13 @@ static int genphy_config_advert(struct phy_device *phydev)
 	int err, changed = 0;
 
 	/* Only allow advertising what this PHY supports */
-	phydev->advertising &= phydev->supported;
-	advertise = phydev->advertising;
+	linkmode_and(phydev->advertising, phydev->advertising,
+		     phydev->supported);
+	if (!ethtool_convert_link_mode_to_legacy_u32(&advertise,
+						     phydev->advertising))
+		phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n",
+			    __ETHTOOL_LINK_MODE_MASK_NBITS,
+			    phydev->advertising);
 
 	/* Setup standard advertisement */
 	adv = phy_read(phydev, MII_ADVERTISE);
@@ -1482,10 +1493,11 @@ static int genphy_config_advert(struct phy_device *phydev)
 	oldadv = adv;
 	adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 
-	if (phydev->supported & (SUPPORTED_1000baseT_Half |
-				 SUPPORTED_1000baseT_Full)) {
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			      phydev->supported) ||
+	    linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+			      phydev->supported))
 		adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-	}
 
 	if (adv != oldadv)
 		changed = 1;
@@ -1690,11 +1702,13 @@ int genphy_read_status(struct phy_device *phydev)
 	if (err)
 		return err;
 
-	phydev->lp_advertising = 0;
+	linkmode_zero(phydev->lp_advertising);
 
 	if (AUTONEG_ENABLE == phydev->autoneg) {
-		if (phydev->supported & (SUPPORTED_1000baseT_Half
-					| SUPPORTED_1000baseT_Full)) {
+		if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				      phydev->supported) ||
+		    linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				      phydev->supported)) {
 			lpagb = phy_read(phydev, MII_STAT1000);
 			if (lpagb < 0)
 				return lpagb;
@@ -1711,8 +1725,8 @@ int genphy_read_status(struct phy_device *phydev)
 				return -ENOLINK;
 			}
 
-			phydev->lp_advertising =
-				mii_stat1000_to_ethtool_lpa_t(lpagb);
+			mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising,
+						       lpagb);
 			common_adv_gb = lpagb & adv << 2;
 		}
 
@@ -1720,7 +1734,7 @@ int genphy_read_status(struct phy_device *phydev)
 		if (lpa < 0)
 			return lpa;
 
-		phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
+		mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
 
 		adv = phy_read(phydev, MII_ADVERTISE);
 		if (adv < 0)
@@ -1801,11 +1815,13 @@ EXPORT_SYMBOL(genphy_soft_reset);
 int genphy_config_init(struct phy_device *phydev)
 {
 	int val;
-	u32 features;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
 
-	features = (SUPPORTED_TP | SUPPORTED_MII
-			| SUPPORTED_AUI | SUPPORTED_FIBRE |
-			SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+	linkmode_set_bit_array(phy_basic_ports_array,
+			       ARRAY_SIZE(phy_basic_ports_array),
+			       features);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features);
 
 	/* Do we support autonegotiation? */
 	val = phy_read(phydev, MII_BMSR);
@@ -1813,16 +1829,16 @@ int genphy_config_init(struct phy_device *phydev)
 		return val;
 
 	if (val & BMSR_ANEGCAPABLE)
-		features |= SUPPORTED_Autoneg;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
 
 	if (val & BMSR_100FULL)
-		features |= SUPPORTED_100baseT_Full;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features);
 	if (val & BMSR_100HALF)
-		features |= SUPPORTED_100baseT_Half;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features);
 	if (val & BMSR_10FULL)
-		features |= SUPPORTED_10baseT_Full;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features);
 	if (val & BMSR_10HALF)
-		features |= SUPPORTED_10baseT_Half;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features);
 
 	if (val & BMSR_ESTATEN) {
 		val = phy_read(phydev, MII_ESTATUS);
@@ -1830,13 +1846,15 @@ int genphy_config_init(struct phy_device *phydev)
 			return val;
 
 		if (val & ESTATUS_1000_TFULL)
-			features |= SUPPORTED_1000baseT_Full;
+			linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+					 features);
 		if (val & ESTATUS_1000_THALF)
-			features |= SUPPORTED_1000baseT_Half;
+			linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+					 features);
 	}
 
-	phydev->supported &= features;
-	phydev->advertising &= features;
+	linkmode_and(phydev->supported, phydev->supported, features);
+	linkmode_and(phydev->advertising, phydev->advertising, features);
 
 	return 0;
 }
@@ -1880,20 +1898,37 @@ EXPORT_SYMBOL(genphy_loopback);
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-	phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
-			       PHY_10BT_FEATURES);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(speeds) = { 0, };
+
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       speeds);
+	linkmode_set_bit_array(phy_gbit_features_array,
+			       ARRAY_SIZE(phy_gbit_features_array),
+			       speeds);
+
+	linkmode_andnot(phydev->supported, phydev->supported, speeds);
 
 	switch (max_speed) {
 	default:
 		return -ENOTSUPP;
 	case SPEED_1000:
-		phydev->supported |= PHY_1000BT_FEATURES;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				 phydev->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				 phydev->supported);
 		/* fall through */
 	case SPEED_100:
-		phydev->supported |= PHY_100BT_FEATURES;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+				 phydev->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+				 phydev->supported);
 		/* fall through */
 	case SPEED_10:
-		phydev->supported |= PHY_10BT_FEATURES;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+				 phydev->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+				 phydev->supported);
 	}
 
 	return 0;
@@ -1907,7 +1942,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
 	if (err)
 		return err;
 
-	phydev->advertising = phydev->supported;
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	return 0;
 }
@@ -1924,10 +1959,8 @@ EXPORT_SYMBOL(phy_set_max_speed);
  */
 void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode)
 {
-	WARN_ON(link_mode > 31);
-
-	phydev->supported &= ~BIT(link_mode);
-	phydev->advertising = phydev->supported;
+	linkmode_clear_bit(link_mode, phydev->supported);
+	linkmode_copy(phydev->advertising, phydev->supported);
 }
 EXPORT_SYMBOL(phy_remove_link_mode);
 
@@ -1940,9 +1973,9 @@ EXPORT_SYMBOL(phy_remove_link_mode);
  */
 void phy_support_sym_pause(struct phy_device *phydev)
 {
-	phydev->supported &= ~SUPPORTED_Asym_Pause;
-	phydev->supported |= SUPPORTED_Pause;
-	phydev->advertising = phydev->supported;
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+	linkmode_copy(phydev->advertising, phydev->supported);
 }
 EXPORT_SYMBOL(phy_support_sym_pause);
 
@@ -1954,8 +1987,9 @@ EXPORT_SYMBOL(phy_support_sym_pause);
  */
 void phy_support_asym_pause(struct phy_device *phydev)
 {
-	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-	phydev->advertising = phydev->supported;
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+	linkmode_copy(phydev->advertising, phydev->supported);
 }
 EXPORT_SYMBOL(phy_support_asym_pause);
 
@@ -1973,12 +2007,13 @@ EXPORT_SYMBOL(phy_support_asym_pause);
 void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
 		       bool autoneg)
 {
-	phydev->supported &= ~SUPPORTED_Pause;
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
 
 	if (rx && tx && autoneg)
-		phydev->supported |= SUPPORTED_Pause;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				 phydev->supported);
 
-	phydev->advertising = phydev->supported;
+	linkmode_copy(phydev->advertising, phydev->supported);
 }
 EXPORT_SYMBOL(phy_set_sym_pause);
 
@@ -1995,20 +2030,29 @@ EXPORT_SYMBOL(phy_set_sym_pause);
  */
 void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx)
 {
-	u16 oldadv = phydev->advertising;
-	u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv);
 
-	if (rx)
-		newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-	if (tx)
-		newadv ^= SUPPORTED_Asym_Pause;
+	linkmode_copy(oldadv, phydev->advertising);
 
-	if (oldadv != newadv) {
-		phydev->advertising = newadv;
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+			   phydev->advertising);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+			   phydev->advertising);
 
-		if (phydev->autoneg)
-			phy_start_aneg(phydev);
+	if (rx) {
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				 phydev->advertising);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				 phydev->advertising);
 	}
+
+	if (tx)
+		linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				    phydev->advertising);
+
+	if (!linkmode_equal(oldadv, phydev->advertising) &&
+	    phydev->autoneg)
+		phy_start_aneg(phydev);
 }
 EXPORT_SYMBOL(phy_set_asym_pause);
 
@@ -2024,8 +2068,10 @@ EXPORT_SYMBOL(phy_set_asym_pause);
 bool phy_validate_pause(struct phy_device *phydev,
 			struct ethtool_pauseparam *pp)
 {
-	if (!(phydev->supported & SUPPORTED_Pause) ||
-	    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+	if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+			       phydev->supported) ||
+	    (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				phydev->supported) &&
 	     pp->rx_pause != pp->tx_pause))
 		return false;
 	return true;
@@ -2074,6 +2120,11 @@ static void of_set_phy_eee_broken(struct phy_device *phydev)
 	phydev->eee_broken_modes = broken;
 }
 
+static bool phy_drv_supports_irq(struct phy_driver *phydrv)
+{
+	return phydrv->config_intr && phydrv->ack_interrupt;
+}
+
 /**
  * phy_probe - probe and init a PHY device
  * @dev: device to probe and init
@@ -2095,8 +2146,7 @@ static int phy_probe(struct device *dev)
 	/* Disable the interrupt if the PHY doesn't support it
 	 * but the interrupt is still a valid one
 	 */
-	if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
-	    phy_interrupt_is_valid(phydev))
+	 if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev))
 		phydev->irq = PHY_POLL;
 
 	if (phydrv->flags & PHY_IS_INTERNAL)
@@ -2109,9 +2159,9 @@ static int phy_probe(struct device *dev)
 	 * or both of these values
 	 */
 	ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features);
-	phydev->supported = features;
+	linkmode_copy(phydev->supported, phydrv->features);
 	of_set_phy_supported(phydev);
-	phydev->advertising = phydev->supported;
+	linkmode_copy(phydev->advertising, phydev->supported);
 
 	/* Get the EEE modes we want to prohibit. We will ask
 	 * the PHY stop advertising these mode later on
@@ -2131,14 +2181,22 @@ static int phy_probe(struct device *dev)
 	 */
 	if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) ||
 	    test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) {
-		phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				   phydev->supported);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				   phydev->supported);
 		if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features))
-			phydev->supported |= SUPPORTED_Pause;
+			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+					 phydev->supported);
 		if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
 			     phydrv->features))
-			phydev->supported |= SUPPORTED_Asym_Pause;
+			linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+					 phydev->supported);
 	} else {
-		phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				 phydev->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				 phydev->supported);
 	}
 
 	/* Set the state to READY by default */
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 491efc1..263385b 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -67,7 +67,7 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
 EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed);
 
 static void phy_led_trigger_format_name(struct phy_device *phy, char *buf,
-					size_t size, char *suffix)
+					size_t size, const char *suffix)
 {
 	snprintf(buf, size, PHY_ID_FMT ":%s",
 		 phy->mdio.bus->id, phy->mdio.addr, suffix);
@@ -77,20 +77,9 @@ static int phy_led_trigger_register(struct phy_device *phy,
 				    struct phy_led_trigger *plt,
 				    unsigned int speed)
 {
-	char name_suffix[PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE];
-
 	plt->speed = speed;
-
-	if (speed < SPEED_1000)
-		snprintf(name_suffix, sizeof(name_suffix), "%dMbps", speed);
-	else if (speed == SPEED_2500)
-		snprintf(name_suffix, sizeof(name_suffix), "2.5Gbps");
-	else
-		snprintf(name_suffix, sizeof(name_suffix), "%dGbps",
-			 DIV_ROUND_CLOSEST(speed, 1000));
-
 	phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name),
-				    name_suffix);
+				    phy_speed_to_str(speed));
 	plt->trigger.name = plt->name;
 
 	return led_trigger_register(&plt->trigger);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9b8dd0d..e7becc7 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -191,8 +191,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
 	phylink_validate(pl, pl->supported, &pl->link_config);
 
 	s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
-			       pl->supported,
-			       __ETHTOOL_LINK_MODE_MASK_NBITS, true);
+			       pl->supported, true);
 	linkmode_zero(pl->supported);
 	phylink_set(pl->supported, MII);
 	if (s) {
@@ -634,13 +633,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
 {
 	struct phylink_link_state config;
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
-	u32 advertising;
 	int ret;
 
 	memset(&config, 0, sizeof(config));
-	ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported);
-	ethtool_convert_legacy_u32_to_link_mode(config.advertising,
-						phy->advertising);
+	linkmode_copy(supported, phy->supported);
+	linkmode_copy(config.advertising, phy->advertising);
 	config.interface = pl->link_config.interface;
 
 	/*
@@ -673,15 +670,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
 	linkmode_copy(pl->link_config.advertising, config.advertising);
 
 	/* Restrict the phy advertisement according to the MAC support. */
-	ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising);
-	phy->advertising = advertising;
+	linkmode_copy(phy->advertising, config.advertising);
 	mutex_unlock(&pl->state_mutex);
 	mutex_unlock(&phy->lock);
 
 	netdev_dbg(pl->netdev,
-		   "phy: setting supported %*pb advertising 0x%08x\n",
+		   "phy: setting supported %*pb advertising %*pb\n",
 		   __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
-		   phy->advertising);
+		   __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
 
 	phy_start_machine(phy);
 	if (phy->irq > 0)
@@ -1088,8 +1084,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 		 * duplex.
 		 */
 		s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
-				       pl->supported,
-				       __ETHTOOL_LINK_MODE_MASK_NBITS, false);
+				       pl->supported, false);
 		if (!s)
 			return -EINVAL;
 
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 889a4dc..cfe2313 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -116,7 +116,6 @@ static struct phy_driver qs6612_driver[] = { {
 	.name		= "QS6612",
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= qs6612_config_init,
 	.ack_interrupt	= qs6612_ack_interrupt,
 	.config_intr	= qs6612_config_intr,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 271e8ad..c6010fb 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -213,17 +213,13 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
 
 static struct phy_driver realtek_drvs[] = {
 	{
-		.phy_id         = 0x00008201,
+		PHY_ID_MATCH_EXACT(0x00008201),
 		.name           = "RTL8201CP Ethernet",
-		.phy_id_mask    = 0x0000ffff,
 		.features       = PHY_BASIC_FEATURES,
-		.flags          = PHY_HAS_INTERRUPT,
 	}, {
-		.phy_id		= 0x001cc816,
+		PHY_ID_MATCH_EXACT(0x001cc816),
 		.name		= "RTL8201F Fast Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.ack_interrupt	= &rtl8201_ack_interrupt,
 		.config_intr	= &rtl8201_config_intr,
 		.suspend	= genphy_suspend,
@@ -231,19 +227,16 @@ static struct phy_driver realtek_drvs[] = {
 		.read_page	= rtl821x_read_page,
 		.write_page	= rtl821x_write_page,
 	}, {
-		.phy_id		= 0x001cc910,
+		PHY_ID_MATCH_EXACT(0x001cc910),
 		.name		= "RTL8211 Gigabit Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.config_aneg	= rtl8211_config_aneg,
 		.read_mmd	= &genphy_read_mmd_unsupported,
 		.write_mmd	= &genphy_write_mmd_unsupported,
 	}, {
-		.phy_id		= 0x001cc912,
+		PHY_ID_MATCH_EXACT(0x001cc912),
 		.name		= "RTL8211B Gigabit Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.ack_interrupt	= &rtl821x_ack_interrupt,
 		.config_intr	= &rtl8211b_config_intr,
 		.read_mmd	= &genphy_read_mmd_unsupported,
@@ -251,39 +244,32 @@ static struct phy_driver realtek_drvs[] = {
 		.suspend	= rtl8211b_suspend,
 		.resume		= rtl8211b_resume,
 	}, {
-		.phy_id		= 0x001cc913,
+		PHY_ID_MATCH_EXACT(0x001cc913),
 		.name		= "RTL8211C Gigabit Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.config_init	= rtl8211c_config_init,
 		.read_mmd	= &genphy_read_mmd_unsupported,
 		.write_mmd	= &genphy_write_mmd_unsupported,
 	}, {
-		.phy_id		= 0x001cc914,
+		PHY_ID_MATCH_EXACT(0x001cc914),
 		.name		= "RTL8211DN Gigabit Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.ack_interrupt	= rtl821x_ack_interrupt,
 		.config_intr	= rtl8211e_config_intr,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
 	}, {
-		.phy_id		= 0x001cc915,
+		PHY_ID_MATCH_EXACT(0x001cc915),
 		.name		= "RTL8211E Gigabit Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.ack_interrupt	= &rtl821x_ack_interrupt,
 		.config_intr	= &rtl8211e_config_intr,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
 	}, {
-		.phy_id		= 0x001cc916,
+		PHY_ID_MATCH_EXACT(0x001cc916),
 		.name		= "RTL8211F Gigabit Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= &rtl8211f_config_init,
 		.ack_interrupt	= &rtl8211f_ack_interrupt,
 		.config_intr	= &rtl8211f_config_intr,
@@ -292,11 +278,9 @@ static struct phy_driver realtek_drvs[] = {
 		.read_page	= rtl821x_read_page,
 		.write_page	= rtl821x_write_page,
 	}, {
-		.phy_id		= 0x001cc961,
+		PHY_ID_MATCH_EXACT(0x001cc961),
 		.name		= "RTL8366RB Gigabit Ethernet",
-		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
-		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= &rtl8366rb_config_init,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
@@ -305,15 +289,8 @@ static struct phy_driver realtek_drvs[] = {
 
 module_phy_driver(realtek_drvs);
 
-static struct mdio_device_id __maybe_unused realtek_tbl[] = {
-	{ 0x001cc816, 0x001fffff },
-	{ 0x001cc910, 0x001fffff },
-	{ 0x001cc912, 0x001fffff },
-	{ 0x001cc913, 0x001fffff },
-	{ 0x001cc914, 0x001fffff },
-	{ 0x001cc915, 0x001fffff },
-	{ 0x001cc916, 0x001fffff },
-	{ 0x001cc961, 0x001fffff },
+static const struct mdio_device_id __maybe_unused realtek_tbl[] = {
+	{ PHY_ID_MATCH_VENDOR(0x001cc800) },
 	{ }
 };
 
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c3282083..f9477ff 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -219,7 +219,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN83C185",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 
 	.probe		= smsc_phy_probe,
 
@@ -239,7 +238,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN8187",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 
 	.probe		= smsc_phy_probe,
 
@@ -264,7 +262,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN8700",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 
 	.probe		= smsc_phy_probe,
 
@@ -290,7 +287,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN911x Internal PHY",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 
 	.probe		= smsc_phy_probe,
 
@@ -309,7 +305,7 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN8710/LAN8720",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
+	.flags		= PHY_RST_AFTER_CLK_EN,
 
 	.probe		= smsc_phy_probe,
 
@@ -335,7 +331,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN8740",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 
 	.probe		= smsc_phy_probe,
 
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 2fe9a87..33d7336 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -87,7 +87,6 @@ static struct phy_driver ste10xp_pdriver[] = {
 	.phy_id_mask = 0xfffffff0,
 	.name = "STe101p",
 	.features = PHY_BASIC_FEATURES,
-	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ste10Xp_config_init,
 	.ack_interrupt = ste10Xp_ack_interrupt,
 	.config_intr = ste10Xp_config_intr,
@@ -98,7 +97,6 @@ static struct phy_driver ste10xp_pdriver[] = {
 	.phy_id_mask = 0xffffffff,
 	.name = "STe100p",
 	.features = PHY_BASIC_FEATURES,
-	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ste10Xp_config_init,
 	.ack_interrupt = ste10Xp_ack_interrupt,
 	.config_intr = ste10Xp_config_intr,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 55f48ee..1e4fc42 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -47,7 +47,7 @@ static int upd60620_read_status(struct phy_device *phydev)
 		return phy_state;
 
 	phydev->link = 0;
-	phydev->lp_advertising = 0;
+	linkmode_zero(phydev->lp_advertising);
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
 
@@ -70,8 +70,8 @@ static int upd60620_read_status(struct phy_device *phydev)
 			if (phy_state < 0)
 				return phy_state;
 
-			phydev->lp_advertising
-				= mii_lpa_to_ethtool_lpa_t(phy_state);
+			mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising,
+						  phy_state);
 
 			if (phydev->duplex == DUPLEX_FULL) {
 				if (phy_state & LPA_PAUSE_CAP)
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index fbf9ad4..0646af4 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -70,7 +70,6 @@
 #define PHY_ID_VSC8244			0x000fc6c0
 #define PHY_ID_VSC8514			0x00070670
 #define PHY_ID_VSC8572			0x000704d0
-#define PHY_ID_VSC8574			0x000704a0
 #define PHY_ID_VSC8601			0x00070420
 #define PHY_ID_VSC7385			0x00070450
 #define PHY_ID_VSC7388			0x00070480
@@ -303,7 +302,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
 			 phydev->drv->phy_id == PHY_ID_VSC8244 ||
 			 phydev->drv->phy_id == PHY_ID_VSC8514 ||
 			 phydev->drv->phy_id == PHY_ID_VSC8572 ||
-			 phydev->drv->phy_id == PHY_ID_VSC8574 ||
 			 phydev->drv->phy_id == PHY_ID_VSC8601) ?
 				MII_VSC8244_IMASK_MASK :
 				MII_VSC8221_IMASK_MASK);
@@ -399,7 +397,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.name           = "Vitesse VSC8234",
 	.phy_id_mask    = 0x000ffff0,
 	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
@@ -409,7 +406,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.name		= "Vitesse VSC8244",
 	.phy_id_mask	= 0x000fffc0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc824x_config_init,
 	.config_aneg	= &vsc82x4_config_aneg,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
@@ -419,7 +415,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.name		= "Vitesse VSC8514",
 	.phy_id_mask	= 0x000ffff0,
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc824x_config_init,
 	.config_aneg	= &vsc82x4_config_aneg,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
@@ -429,17 +424,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.name           = "Vitesse VSC8572",
 	.phy_id_mask    = 0x000ffff0,
 	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
-	.config_init    = &vsc824x_config_init,
-	.config_aneg    = &vsc82x4_config_aneg,
-	.ack_interrupt  = &vsc824x_ack_interrupt,
-	.config_intr    = &vsc82xx_config_intr,
-}, {
-	.phy_id         = PHY_ID_VSC8574,
-	.name           = "Vitesse VSC8574",
-	.phy_id_mask    = 0x000ffff0,
-	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
@@ -449,7 +433,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.name           = "Vitesse VSC8601",
 	.phy_id_mask    = 0x000ffff0,
 	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc8601_config_init,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
@@ -494,7 +477,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.name           = "Vitesse VSC8662",
 	.phy_id_mask    = 0x000ffff0,
 	.features       = PHY_GBIT_FEATURES,
-	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
@@ -505,7 +487,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.phy_id_mask	= 0x000ffff0,
 	.name		= "Vitesse VSC8221",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc8221_config_init,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
@@ -515,7 +496,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.phy_id_mask	= 0x000ffff0,
 	.name		= "Vitesse VSC8211",
 	.features	= PHY_GBIT_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc8221_config_init,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
@@ -528,7 +508,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
 	{ PHY_ID_VSC8244, 0x000fffc0 },
 	{ PHY_ID_VSC8514, 0x000ffff0 },
 	{ PHY_ID_VSC8572, 0x000ffff0 },
-	{ PHY_ID_VSC8574, 0x000ffff0 },
 	{ PHY_ID_VSC7385, 0x000ffff0 },
 	{ PHY_ID_VSC7388, 0x000ffff0 },
 	{ PHY_ID_VSC7395, 0x000ffff0 },
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e244f5d..56575f8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -188,6 +188,11 @@ struct tun_file {
 	struct xdp_rxq_info xdp_rxq;
 };
 
+struct tun_page {
+	struct page *page;
+	int count;
+};
+
 struct tun_flow_entry {
 	struct hlist_node hash_link;
 	struct rcu_head rcu;
@@ -1473,23 +1478,22 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
 	skb->truesize += skb->data_len;
 
 	for (i = 1; i < it->nr_segs; i++) {
-		struct page_frag *pfrag = &current->task_frag;
 		size_t fragsz = it->iov[i].iov_len;
+		struct page *page;
+		void *frag;
 
 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
 			err = -EINVAL;
 			goto free;
 		}
-
-		if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
+		frag = netdev_alloc_frag(fragsz);
+		if (!frag) {
 			err = -ENOMEM;
 			goto free;
 		}
-
-		skb_fill_page_desc(skb, i - 1, pfrag->page,
-				   pfrag->offset, fragsz);
-		page_ref_inc(pfrag->page);
-		pfrag->offset += fragsz;
+		page = virt_to_head_page(frag);
+		skb_fill_page_desc(skb, i - 1, page,
+				   frag - page_address(page), fragsz);
 	}
 
 	return skb;
@@ -2381,9 +2385,16 @@ static void tun_sock_write_space(struct sock *sk)
 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
 }
 
+static void tun_put_page(struct tun_page *tpage)
+{
+	if (tpage->page)
+		__page_frag_cache_drain(tpage->page, tpage->count);
+}
+
 static int tun_xdp_one(struct tun_struct *tun,
 		       struct tun_file *tfile,
-		       struct xdp_buff *xdp, int *flush)
+		       struct xdp_buff *xdp, int *flush,
+		       struct tun_page *tpage)
 {
 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
 	struct virtio_net_hdr *gso = &hdr->gso;
@@ -2394,6 +2405,7 @@ static int tun_xdp_one(struct tun_struct *tun,
 	int buflen = hdr->buflen;
 	int err = 0;
 	bool skb_xdp = false;
+	struct page *page;
 
 	xdp_prog = rcu_dereference(tun->xdp_prog);
 	if (xdp_prog) {
@@ -2420,7 +2432,14 @@ static int tun_xdp_one(struct tun_struct *tun,
 		case XDP_PASS:
 			break;
 		default:
-			put_page(virt_to_head_page(xdp->data));
+			page = virt_to_head_page(xdp->data);
+			if (tpage->page == page) {
+				++tpage->count;
+			} else {
+				tun_put_page(tpage);
+				tpage->page = page;
+				tpage->count = 1;
+			}
 			return 0;
 		}
 	}
@@ -2452,7 +2471,8 @@ static int tun_xdp_one(struct tun_struct *tun,
 			goto out;
 	}
 
-	if (!rcu_dereference(tun->steering_prog))
+	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
+	    !tfile->detached)
 		rxhash = __skb_get_hash_symmetric(skb);
 
 	skb_record_rx_queue(skb, tfile->queue_index);
@@ -2484,15 +2504,18 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 		return -EBADFD;
 
 	if (ctl && (ctl->type == TUN_MSG_PTR)) {
+		struct tun_page tpage;
 		int n = ctl->num;
 		int flush = 0;
 
+		memset(&tpage, 0, sizeof(tpage));
+
 		local_bh_disable();
 		rcu_read_lock();
 
 		for (i = 0; i < n; i++) {
 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
-			tun_xdp_one(tun, tfile, xdp, &flush);
+			tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
 		}
 
 		if (flush)
@@ -2501,6 +2524,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 		rcu_read_unlock();
 		local_bh_enable();
 
+		tun_put_page(&tpage);
+
 		ret = total_len;
 		goto out;
 	}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index be1917b..3c8bdac 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/if_vlan.h>
 #include <linux/uaccess.h>
+#include <linux/linkmode.h>
 #include <linux/list.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -1586,18 +1587,17 @@ static int lan78xx_set_pause(struct net_device *net,
 		dev->fc_request_control |= FLOW_CTRL_TX;
 
 	if (ecmd.base.autoneg) {
+		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
 		u32 mii_adv;
-		u32 advertising;
 
-		ethtool_convert_link_mode_to_legacy_u32(
-			&advertising, ecmd.link_modes.advertising);
-
-		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				   ecmd.link_modes.advertising);
+		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				   ecmd.link_modes.advertising);
 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
-		advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
-
-		ethtool_convert_legacy_u32_to_link_mode(
-			ecmd.link_modes.advertising, advertising);
+		mii_adv_to_linkmode_adv_t(fc, mii_adv);
+		linkmode_or(ecmd.link_modes.advertising, fc,
+			    ecmd.link_modes.advertising);
 
 		phy_ethtool_ksettings_set(phydev, &ecmd);
 	}
@@ -2095,6 +2095,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
 
 static int lan78xx_phy_init(struct lan78xx_net *dev)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
 	int ret;
 	u32 mii_adv;
 	struct phy_device *phydev;
@@ -2158,9 +2159,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
 
 	/* support both flow controls */
 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
-	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+			   phydev->advertising);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+			   phydev->advertising);
 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
-	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+	mii_adv_to_linkmode_adv_t(fc, mii_adv);
+	linkmode_or(phydev->advertising, fc, phydev->advertising);
 
 	if (phydev->mdio.dev.of_node) {
 		u32 reg;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f2d01cb..e3d0862 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -618,9 +618,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
 		return;
 	}
 
-	memcpy(&intdata, urb->transfer_buffer, 4);
-	le32_to_cpus(&intdata);
-
+	intdata = get_unaligned_le32(urb->transfer_buffer);
 	netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
 
 	if (intdata & INT_ENP_PHY_INT_)
@@ -1295,6 +1293,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 		dev->net->features |= NETIF_F_RXCSUM;
 
 	dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+	set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
 
 	smsc95xx_init_mac_address(dev);
 
@@ -1933,8 +1932,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 		unsigned char *packet;
 		u16 size;
 
-		memcpy(&header, skb->data, sizeof(header));
-		le32_to_cpus(&header);
+		header = get_unaligned_le32(skb->data);
 		skb_pull(skb, 4 + NET_IP_ALIGN);
 		packet = skb->data;
 
@@ -2011,12 +2009,30 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
 	return (high_16 << 16) | low_16;
 }
 
+/* The TX CSUM won't work if the checksum lies in the last 4 bytes of the
+ * transmission. This is fairly unlikely, only seems to trigger with some
+ * short TCP ACK packets sent.
+ *
+ * Note, this calculation should probably check for the alignment of the
+ * data as well, but a straight check for csum being in the last four bytes
+ * of the packet should be ok for now.
+ */
+static bool smsc95xx_can_tx_checksum(struct sk_buff *skb)
+{
+       unsigned int len = skb->len - skb_checksum_start_offset(skb);
+
+       if (skb->len <= 45)
+	       return false;
+       return skb->csum_offset < (len - (4 + 1));
+}
+
 static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
 					 struct sk_buff *skb, gfp_t flags)
 {
 	bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
 	int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
 	u32 tx_cmd_a, tx_cmd_b;
+	void *ptr;
 
 	/* We do not advertise SG, so skbs should be already linearized */
 	BUG_ON(skb_shinfo(skb)->nr_frags);
@@ -2030,8 +2046,11 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
 		return NULL;
 	}
 
+	tx_cmd_b = (u32)skb->len;
+	tx_cmd_a = tx_cmd_b | TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+
 	if (csum) {
-		if (skb->len <= 45) {
+		if (!smsc95xx_can_tx_checksum(skb)) {
 			/* workaround - hardware tx checksum does not work
 			 * properly with extremely small packets */
 			long csstart = skb_checksum_start_offset(skb);
@@ -2043,24 +2062,18 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
 			csum = false;
 		} else {
 			u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
-			skb_push(skb, 4);
-			cpu_to_le32s(&csum_preamble);
-			memcpy(skb->data, &csum_preamble, 4);
+			ptr = skb_push(skb, 4);
+			put_unaligned_le32(csum_preamble, ptr);
+
+			tx_cmd_a += 4;
+			tx_cmd_b += 4;
+			tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
 		}
 	}
 
-	skb_push(skb, 4);
-	tx_cmd_b = (u32)(skb->len - 4);
-	if (csum)
-		tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
-	cpu_to_le32s(&tx_cmd_b);
-	memcpy(skb->data, &tx_cmd_b, 4);
-
-	skb_push(skb, 4);
-	tx_cmd_a = (u32)(skb->len - 8) | TX_CMD_A_FIRST_SEG_ |
-		TX_CMD_A_LAST_SEG_;
-	cpu_to_le32s(&tx_cmd_a);
-	memcpy(skb->data, &tx_cmd_a, 4);
+	ptr = skb_push(skb, 8);
+	put_unaligned_le32(tx_cmd_a, ptr);
+	put_unaligned_le32(tx_cmd_b, ptr+4);
 
 	return skb;
 }
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 890fa5b..f412ea1 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1253,7 +1253,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 		return PTR_ERR(net);
 
 	peer = rtnl_create_link(net, ifname, name_assign_type,
-				&veth_link_ops, tbp);
+				&veth_link_ops, tbp, extack);
 	if (IS_ERR(peer)) {
 		put_net(net);
 		return PTR_ERR(peer);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 69b7227..21ad4b1 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -981,24 +981,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
 				   struct sk_buff *skb)
 {
 	int orig_iif = skb->skb_iif;
-	bool need_strict;
+	bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+	bool is_ndisc = ipv6_ndisc_frame(skb);
 
-	/* loopback traffic; do not push through packet taps again.
-	 * Reset pkt_type for upper layers to process skb
+	/* loopback, multicast & non-ND link-local traffic; do not push through
+	 * packet taps again. Reset pkt_type for upper layers to process skb
 	 */
-	if (skb->pkt_type == PACKET_LOOPBACK) {
+	if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
 		skb->dev = vrf_dev;
 		skb->skb_iif = vrf_dev->ifindex;
 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
-		skb->pkt_type = PACKET_HOST;
+		if (skb->pkt_type == PACKET_LOOPBACK)
+			skb->pkt_type = PACKET_HOST;
 		goto out;
 	}
 
-	/* if packet is NDISC or addressed to multicast or link-local
-	 * then keep the ingress interface
-	 */
-	need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
-	if (!ipv6_ndisc_frame(skb) && !need_strict) {
+	/* if packet is NDISC then keep the ingress interface */
+	if (!is_ndisc) {
 		vrf_rx_stats(vrf_dev, skb->len);
 		skb->dev = vrf_dev;
 		skb->skb_iif = vrf_dev->ifindex;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 297cdea..9110662 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -79,9 +79,11 @@ struct vxlan_fdb {
 	u8		  eth_addr[ETH_ALEN];
 	u16		  state;	/* see ndm_state */
 	__be32		  vni;
-	u8		  flags;	/* see ndm_flags */
+	u16		  flags;	/* see ndm_flags and below */
 };
 
+#define NTF_VXLAN_ADDED_BY_USER 0x100
+
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
 
@@ -376,6 +378,7 @@ static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
 		.remote_ifindex = rd->remote_ifindex,
 		.vni = fdb->vni,
 		.offloaded = rd->offloaded,
+		.added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER,
 	};
 	memcpy(info.eth_addr, fdb->eth_addr, ETH_ALEN);
 
@@ -384,15 +387,19 @@ static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
 }
 
 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
-			     struct vxlan_rdst *rd, int type)
+			     struct vxlan_rdst *rd, int type, bool swdev_notify)
 {
-	switch (type) {
-	case RTM_NEWNEIGH:
-		vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, true);
-		break;
-	case RTM_DELNEIGH:
-		vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, false);
-		break;
+	if (swdev_notify) {
+		switch (type) {
+		case RTM_NEWNEIGH:
+			vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+							   true);
+			break;
+		case RTM_DELNEIGH:
+			vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+							   false);
+			break;
+		}
 	}
 
 	__vxlan_fdb_notify(vxlan, fdb, rd, type);
@@ -409,7 +416,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
 		.remote_vni = cpu_to_be32(VXLAN_N_VID),
 	};
 
-	vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
+	vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
 }
 
 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -421,7 +428,7 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
 
 	memcpy(f.eth_addr, eth_addr, ETH_ALEN);
 
-	vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
+	vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
 }
 
 /* Hash Ethernet address */
@@ -540,6 +547,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
 	fdb_info->remote_ifindex = rdst->remote_ifindex;
 	fdb_info->vni = vni;
 	fdb_info->offloaded = rdst->offloaded;
+	fdb_info->added_by_user = f->flags & NTF_VXLAN_ADDED_BY_USER;
 	ether_addr_copy(fdb_info->eth_addr, mac);
 
 out:
@@ -700,7 +708,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
 
 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
 					 const u8 *mac, __u16 state,
-					 __be32 src_vni, __u8 ndm_flags)
+					 __be32 src_vni, __u16 ndm_flags)
 {
 	struct vxlan_fdb *f;
 
@@ -720,7 +728,7 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
 			    const u8 *mac, union vxlan_addr *ip,
 			    __u16 state, __be16 port, __be32 src_vni,
-			    __be32 vni, __u32 ifindex, __u8 ndm_flags,
+			    __be32 vni, __u32 ifindex, __u16 ndm_flags,
 			    struct vxlan_fdb **fdb)
 {
 	struct vxlan_rdst *rd = NULL;
@@ -756,9 +764,10 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
 			    const u8 *mac, union vxlan_addr *ip,
 			    __u16 state, __u16 flags,
 			    __be16 port, __be32 src_vni, __be32 vni,
-			    __u32 ifindex, __u8 ndm_flags)
+			    __u32 ifindex, __u16 ndm_flags,
+			    bool swdev_notify)
 {
-	__u8 fdb_flags = (ndm_flags & ~NTF_USE);
+	__u16 fdb_flags = (ndm_flags & ~NTF_USE);
 	struct vxlan_rdst *rd = NULL;
 	struct vxlan_fdb *f;
 	int notify = 0;
@@ -771,16 +780,24 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
 				   "lost race to create %pM\n", mac);
 			return -EEXIST;
 		}
-		if (f->state != state) {
-			f->state = state;
-			f->updated = jiffies;
-			notify = 1;
+
+		/* Do not allow an externally learned entry to take over an
+		 * entry added by the user.
+		 */
+		if (!(fdb_flags & NTF_EXT_LEARNED) ||
+		    !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
+			if (f->state != state) {
+				f->state = state;
+				f->updated = jiffies;
+				notify = 1;
+			}
+			if (f->flags != fdb_flags) {
+				f->flags = fdb_flags;
+				f->updated = jiffies;
+				notify = 1;
+			}
 		}
-		if (f->flags != fdb_flags) {
-			f->flags = fdb_flags;
-			f->updated = jiffies;
-			notify = 1;
-		}
+
 		if ((flags & NLM_F_REPLACE)) {
 			/* Only change unicasts */
 			if (!(is_multicast_ether_addr(f->eth_addr) ||
@@ -822,7 +839,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
 	if (notify) {
 		if (rd == NULL)
 			rd = first_remote_rtnl(f);
-		vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
+		vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify);
 	}
 
 	return 0;
@@ -841,7 +858,7 @@ static void vxlan_fdb_free(struct rcu_head *head)
 }
 
 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
-			      bool do_notify)
+			      bool do_notify, bool swdev_notify)
 {
 	struct vxlan_rdst *rd;
 
@@ -851,7 +868,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
 	--vxlan->addrcnt;
 	if (do_notify)
 		list_for_each_entry(rd, &f->remotes, list)
-			vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+			vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
+					 swdev_notify);
 
 	hlist_del_rcu(&f->hlist);
 	call_rcu(&f->rcu, vxlan_fdb_free);
@@ -866,10 +884,10 @@ static void vxlan_dst_free(struct rcu_head *head)
 }
 
 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
-				  struct vxlan_rdst *rd)
+				  struct vxlan_rdst *rd, bool swdev_notify)
 {
 	list_del_rcu(&rd->list);
-	vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+	vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify);
 	call_rcu(&rd->rcu, vxlan_dst_free);
 }
 
@@ -968,7 +986,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
 	spin_lock_bh(&vxlan->hash_lock);
 	err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
-			       port, src_vni, vni, ifindex, ndm->ndm_flags);
+			       port, src_vni, vni, ifindex,
+			       ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
+			       true);
 	spin_unlock_bh(&vxlan->hash_lock);
 
 	return err;
@@ -977,7 +997,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
 			      const unsigned char *addr, union vxlan_addr ip,
 			      __be16 port, __be32 src_vni, __be32 vni,
-			      u32 ifindex, u16 vid)
+			      u32 ifindex, bool swdev_notify)
 {
 	struct vxlan_fdb *f;
 	struct vxlan_rdst *rd = NULL;
@@ -997,11 +1017,11 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
 	 * otherwise destroy the fdb entry
 	 */
 	if (rd && !list_is_singular(&f->remotes)) {
-		vxlan_fdb_dst_destroy(vxlan, f, rd);
+		vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify);
 		goto out;
 	}
 
-	vxlan_fdb_destroy(vxlan, f, true);
+	vxlan_fdb_destroy(vxlan, f, true, swdev_notify);
 
 out:
 	return 0;
@@ -1025,7 +1045,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
 	spin_lock_bh(&vxlan->hash_lock);
 	err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
-				 vid);
+				 true);
 	spin_unlock_bh(&vxlan->hash_lock);
 
 	return err;
@@ -1103,7 +1123,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
 		rdst->remote_ip = *src_ip;
 		f->updated = jiffies;
-		vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
+		vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true);
 	} else {
 		/* learned new entry */
 		spin_lock(&vxlan->hash_lock);
@@ -1116,7 +1136,7 @@ static bool vxlan_snoop(struct net_device *dev,
 					 vxlan->cfg.dst_port,
 					 vni,
 					 vxlan->default_dst.remote_vni,
-					 ifindex, NTF_SELF);
+					 ifindex, NTF_SELF, true);
 		spin_unlock(&vxlan->hash_lock);
 	}
 
@@ -1552,6 +1572,34 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
 	return 0;
 }
 
+/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
+static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+	struct vxlan_dev *vxlan;
+	struct vxlan_sock *vs;
+	struct vxlanhdr *hdr;
+	__be32 vni;
+
+	if (skb->len < VXLAN_HLEN)
+		return -EINVAL;
+
+	hdr = vxlan_hdr(skb);
+
+	if (!(hdr->vx_flags & VXLAN_HF_VNI))
+		return -EINVAL;
+
+	vs = rcu_dereference_sk_user_data(sk);
+	if (!vs)
+		return -ENOENT;
+
+	vni = vxlan_vni(hdr->vx_vni);
+	vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+	if (!vxlan)
+		return -ENOENT;
+
+	return 0;
+}
+
 static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2250,13 +2298,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 			goto tx_error;
 		}
 
-		/* Bypass encapsulation if the destination is local */
 		if (!info) {
+			/* Bypass encapsulation if the destination is local */
 			err = encap_bypass_if_local(skb, dev, vxlan, dst,
 						    dst_port, ifindex, vni,
 						    &rt->dst, rt->rt_flags);
 			if (err)
 				goto out_unlock;
+
+			if (vxlan->cfg.df == VXLAN_DF_SET) {
+				df = htons(IP_DF);
+			} else if (vxlan->cfg.df == VXLAN_DF_INHERIT) {
+				struct ethhdr *eth = eth_hdr(skb);
+
+				if (ntohs(eth->h_proto) == ETH_P_IPV6 ||
+				    (ntohs(eth->h_proto) == ETH_P_IP &&
+				     old_iph->frag_off & htons(IP_DF)))
+					df = htons(IP_DF);
+			}
 		} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
 			df = htons(IP_DF);
 		}
@@ -2461,7 +2520,7 @@ static void vxlan_cleanup(struct timer_list *t)
 					   "garbage collect %pM\n",
 					   f->eth_addr);
 				f->state = NUD_STALE;
-				vxlan_fdb_destroy(vxlan, f, true);
+				vxlan_fdb_destroy(vxlan, f, true, true);
 			} else if (time_before(timeout, next_timer))
 				next_timer = timeout;
 		}
@@ -2512,7 +2571,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
 	spin_lock_bh(&vxlan->hash_lock);
 	f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
 	if (f)
-		vxlan_fdb_destroy(vxlan, f, true);
+		vxlan_fdb_destroy(vxlan, f, true, true);
 	spin_unlock_bh(&vxlan->hash_lock);
 }
 
@@ -2566,7 +2625,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
 				continue;
 			/* the all_zeros_mac entry is deleted at vxlan_uninit */
 			if (!is_zero_ether_addr(f->eth_addr))
-				vxlan_fdb_destroy(vxlan, f, true);
+				vxlan_fdb_destroy(vxlan, f, true, true);
 		}
 	}
 	spin_unlock_bh(&vxlan->hash_lock);
@@ -2809,6 +2868,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
 	[IFLA_VXLAN_GPE]	= { .type = NLA_FLAG, },
 	[IFLA_VXLAN_REMCSUM_NOPARTIAL]	= { .type = NLA_FLAG },
 	[IFLA_VXLAN_TTL_INHERIT]	= { .type = NLA_FLAG },
+	[IFLA_VXLAN_DF]		= { .type = NLA_U8 },
 };
 
 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -2865,6 +2925,16 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
 		}
 	}
 
+	if (data[IFLA_VXLAN_DF]) {
+		enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
+		if (df < 0 || df > VXLAN_DF_MAX) {
+			NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
+					    "Invalid DF attribute");
+			return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
@@ -2948,6 +3018,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
 	tunnel_cfg.sk_user_data = vs;
 	tunnel_cfg.encap_type = 1;
 	tunnel_cfg.encap_rcv = vxlan_rcv;
+	tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
 	tunnel_cfg.encap_destroy = NULL;
 	tunnel_cfg.gro_receive = vxlan_gro_receive;
 	tunnel_cfg.gro_complete = vxlan_gro_complete;
@@ -3292,13 +3363,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 
 	/* notify default fdb entry */
 	if (f)
-		vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+		vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
+				 true);
 
 	list_add(&vxlan->next, &vn->vxlan_list);
 	return 0;
 errout:
 	if (f)
-		vxlan_fdb_destroy(vxlan, f, false);
+		vxlan_fdb_destroy(vxlan, f, false, false);
 	return err;
 }
 
@@ -3386,11 +3458,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
 		conf->flags |= VXLAN_F_LEARN;
 	}
 
-	if (data[IFLA_VXLAN_AGEING]) {
-		if (changelink)
-			return -EOPNOTSUPP;
+	if (data[IFLA_VXLAN_AGEING])
 		conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
-	}
 
 	if (data[IFLA_VXLAN_PROXY]) {
 		if (changelink)
@@ -3509,6 +3578,9 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
 		conf->mtu = nla_get_u32(tb[IFLA_MTU]);
 	}
 
+	if (data[IFLA_VXLAN_DF])
+		conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
 	return 0;
 }
 
@@ -3532,6 +3604,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_rdst *dst = &vxlan->default_dst;
+	unsigned long old_age_interval;
 	struct vxlan_rdst old_dst;
 	struct vxlan_config conf;
 	struct vxlan_fdb *f = NULL;
@@ -3542,12 +3615,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 	if (err)
 		return err;
 
+	old_age_interval = vxlan->cfg.age_interval;
 	memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
 
 	err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
 	if (err)
 		return err;
 
+	if (old_age_interval != vxlan->cfg.age_interval)
+		mod_timer(&vxlan->age_timer, jiffies);
+
 	/* handle default dst entry */
 	if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
 		spin_lock_bh(&vxlan->hash_lock);
@@ -3557,7 +3634,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 					   vxlan->cfg.dst_port,
 					   old_dst.remote_vni,
 					   old_dst.remote_vni,
-					   old_dst.remote_ifindex, 0);
+					   old_dst.remote_ifindex,
+					   true);
 
 		if (!vxlan_addr_any(&dst->remote_ip)) {
 			err = vxlan_fdb_create(vxlan, all_zeros_mac,
@@ -3572,7 +3650,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 				spin_unlock_bh(&vxlan->hash_lock);
 				return err;
 			}
-			vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+			vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
+					 RTM_NEWNEIGH, true);
 		}
 		spin_unlock_bh(&vxlan->hash_lock);
 	}
@@ -3601,6 +3680,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_TTL */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_TTL_INHERIT */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_TOS */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_DF */
 		nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_LEARNING */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_PROXY */
@@ -3667,6 +3747,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 	    nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
 		       !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
 	    nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
+	    nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
 	    nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
 	    nla_put_u8(skb, IFLA_VXLAN_LEARNING,
 			!!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
@@ -3749,7 +3830,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
 	memset(&tb, 0, sizeof(tb));
 
 	dev = rtnl_create_link(net, name, name_assign_type,
-			       &vxlan_link_ops, tb);
+			       &vxlan_link_ops, tb, NULL);
 	if (IS_ERR(dev))
 		return dev;
 
@@ -3844,18 +3925,89 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
 	spin_unlock_bh(&vxlan->hash_lock);
 }
 
+static int
+vxlan_fdb_external_learn_add(struct net_device *dev,
+			     struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	int err;
+
+	spin_lock_bh(&vxlan->hash_lock);
+	err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
+			       NUD_REACHABLE,
+			       NLM_F_CREATE | NLM_F_REPLACE,
+			       fdb_info->remote_port,
+			       fdb_info->vni,
+			       fdb_info->remote_vni,
+			       fdb_info->remote_ifindex,
+			       NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
+			       false);
+	spin_unlock_bh(&vxlan->hash_lock);
+
+	return err;
+}
+
+static int
+vxlan_fdb_external_learn_del(struct net_device *dev,
+			     struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct vxlan_fdb *f;
+	int err = 0;
+
+	spin_lock_bh(&vxlan->hash_lock);
+
+	f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+	if (!f)
+		err = -ENOENT;
+	else if (f->flags & NTF_EXT_LEARNED)
+		err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr,
+					 fdb_info->remote_ip,
+					 fdb_info->remote_port,
+					 fdb_info->vni,
+					 fdb_info->remote_vni,
+					 fdb_info->remote_ifindex,
+					 false);
+
+	spin_unlock_bh(&vxlan->hash_lock);
+
+	return err;
+}
+
 static int vxlan_switchdev_event(struct notifier_block *unused,
 				 unsigned long event, void *ptr)
 {
 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+	struct switchdev_notifier_vxlan_fdb_info *fdb_info;
+	int err = 0;
 
 	switch (event) {
 	case SWITCHDEV_VXLAN_FDB_OFFLOADED:
 		vxlan_fdb_offloaded_set(dev, ptr);
 		break;
+	case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE:
+		fdb_info = ptr;
+		err = vxlan_fdb_external_learn_add(dev, fdb_info);
+		if (err) {
+			err = notifier_from_errno(err);
+			break;
+		}
+		fdb_info->offloaded = true;
+		vxlan_fdb_offloaded_set(dev, fdb_info);
+		break;
+	case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE:
+		fdb_info = ptr;
+		err = vxlan_fdb_external_learn_del(dev, fdb_info);
+		if (err) {
+			err = notifier_from_errno(err);
+			break;
+		}
+		fdb_info->offloaded = false;
+		vxlan_fdb_offloaded_set(dev, fdb_info);
+		break;
 	}
 
-	return 0;
+	return err;
 }
 
 static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 4d64096..7a42336 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -391,6 +391,7 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
 		dev_kfree_skb(skb);
 		return -ENOMEM;
 	}
+	netdev_sent_queue(dev, skb->len);
 	spin_lock_irqsave(&priv->lock, flags);
 
 	/* Start from the next BD that should be filled */
@@ -447,6 +448,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
 {
 	/* Start from the next BD that should be filled */
 	struct net_device *dev = priv->ndev;
+	unsigned int bytes_sent = 0;
+	int howmany = 0;
 	struct qe_bd *bd;		/* BD pointer */
 	u16 bd_status;
 	int tx_restart = 0;
@@ -474,6 +477,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
 		skb = priv->tx_skbuff[priv->skb_dirtytx];
 		if (!skb)
 			break;
+		howmany++;
+		bytes_sent += skb->len;
 		dev->stats.tx_packets++;
 		memset(priv->tx_buffer +
 		       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
@@ -501,6 +506,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
 	if (tx_restart)
 		hdlc_tx_restart(priv);
 
+	netdev_completed_queue(dev, howmany, bytes_sent);
 	return 0;
 }
 
@@ -721,6 +727,7 @@ static int uhdlc_open(struct net_device *dev)
 		priv->hdlc_busy = 1;
 		netif_device_attach(priv->ndev);
 		napi_enable(&priv->napi);
+		netdev_reset_queue(dev);
 		netif_start_queue(dev);
 		hdlc_open(dev);
 	}
@@ -812,6 +819,7 @@ static int uhdlc_close(struct net_device *dev)
 
 	free_irq(priv->ut_info->uf_info.irq, priv);
 	netif_stop_queue(dev);
+	netdev_reset_queue(dev);
 	priv->hdlc_busy = 0;
 
 	return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 3e37c8c..b2ad212 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -342,6 +342,37 @@ static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
 	return err;
 }
 
+static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
+			  struct mmc_command *mc, int sg_cnt, int req_sz,
+			  int func_blk_sz, u32 *addr,
+			  struct brcmf_sdio_dev *sdiodev,
+			  struct sdio_func *func, int write)
+{
+	int ret;
+
+	md->sg_len = sg_cnt;
+	md->blocks = req_sz / func_blk_sz;
+	mc->arg |= (*addr & 0x1FFFF) << 9;	/* address */
+	mc->arg |= md->blocks & 0x1FF;	/* block count */
+	/* incrementing addr for function 1 */
+	if (func->num == 1)
+		*addr += req_sz;
+
+	mmc_set_data_timeout(md, func->card);
+	mmc_wait_for_req(func->card->host, mr);
+
+	ret = mc->error ? mc->error : md->error;
+	if (ret == -ENOMEDIUM) {
+		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+	} else if (ret != 0) {
+		brcmf_err("CMD53 sg block %s failed %d\n",
+			  write ? "write" : "read", ret);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
 /**
  * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  * @sdiodev: brcmfmac sdio device
@@ -360,11 +391,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
 				 struct sk_buff_head *pktlist)
 {
 	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
-	unsigned int max_req_sz, orig_offset, dst_offset;
-	unsigned short max_seg_cnt, seg_sz;
+	unsigned int max_req_sz, src_offset, dst_offset;
 	unsigned char *pkt_data, *orig_data, *dst_data;
-	struct sk_buff *pkt_next = NULL, *local_pkt_next;
 	struct sk_buff_head local_list, *target_list;
+	struct sk_buff *pkt_next = NULL, *src;
+	unsigned short max_seg_cnt;
 	struct mmc_request mmc_req;
 	struct mmc_command mmc_cmd;
 	struct mmc_data mmc_dat;
@@ -404,9 +435,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
 	max_req_sz = sdiodev->max_request_size;
 	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
 			    target_list->qlen);
-	seg_sz = target_list->qlen;
-	pkt_offset = 0;
-	pkt_next = target_list->next;
 
 	memset(&mmc_req, 0, sizeof(struct mmc_request));
 	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
@@ -425,12 +453,12 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
 	mmc_req.cmd = &mmc_cmd;
 	mmc_req.data = &mmc_dat;
 
-	while (seg_sz) {
-		req_sz = 0;
-		sg_cnt = 0;
-		sgl = sdiodev->sgtable.sgl;
-		/* prep sg table */
-		while (pkt_next != (struct sk_buff *)target_list) {
+	req_sz = 0;
+	sg_cnt = 0;
+	sgl = sdiodev->sgtable.sgl;
+	skb_queue_walk(target_list, pkt_next) {
+		pkt_offset = 0;
+		while (pkt_offset < pkt_next->len) {
 			pkt_data = pkt_next->data + pkt_offset;
 			sg_data_sz = pkt_next->len - pkt_offset;
 			if (sg_data_sz > sdiodev->max_segment_size)
@@ -439,72 +467,55 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
 				sg_data_sz = max_req_sz - req_sz;
 
 			sg_set_buf(sgl, pkt_data, sg_data_sz);
-
 			sg_cnt++;
+
 			sgl = sg_next(sgl);
 			req_sz += sg_data_sz;
 			pkt_offset += sg_data_sz;
-			if (pkt_offset == pkt_next->len) {
-				pkt_offset = 0;
-				pkt_next = pkt_next->next;
+			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
+				ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+						     sg_cnt, req_sz, func_blk_sz,
+						     &addr, sdiodev, func, write);
+				if (ret)
+					goto exit_queue_walk;
+				req_sz = 0;
+				sg_cnt = 0;
+				sgl = sdiodev->sgtable.sgl;
 			}
-
-			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
-				break;
-		}
-		seg_sz -= sg_cnt;
-
-		if (req_sz % func_blk_sz != 0) {
-			brcmf_err("sg request length %u is not %u aligned\n",
-				  req_sz, func_blk_sz);
-			ret = -ENOTBLK;
-			goto exit;
-		}
-
-		mmc_dat.sg_len = sg_cnt;
-		mmc_dat.blocks = req_sz / func_blk_sz;
-		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;	/* address */
-		mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;	/* block count */
-		/* incrementing addr for function 1 */
-		if (func->num == 1)
-			addr += req_sz;
-
-		mmc_set_data_timeout(&mmc_dat, func->card);
-		mmc_wait_for_req(func->card->host, &mmc_req);
-
-		ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
-		if (ret == -ENOMEDIUM) {
-			brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
-			break;
-		} else if (ret != 0) {
-			brcmf_err("CMD53 sg block %s failed %d\n",
-				  write ? "write" : "read", ret);
-			ret = -EIO;
-			break;
 		}
 	}
-
+	if (sg_cnt)
+		ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+				     sg_cnt, req_sz, func_blk_sz,
+				     &addr, sdiodev, func, write);
+exit_queue_walk:
 	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
-		local_pkt_next = local_list.next;
-		orig_offset = 0;
+		src = __skb_peek(&local_list);
+		src_offset = 0;
 		skb_queue_walk(pktlist, pkt_next) {
 			dst_offset = 0;
-			do {
-				req_sz = local_pkt_next->len - orig_offset;
-				req_sz = min_t(uint, pkt_next->len - dst_offset,
-					       req_sz);
-				orig_data = local_pkt_next->data + orig_offset;
+
+			/* This is safe because we must have enough SKB data
+			 * in the local list to cover everything in pktlist.
+			 */
+			while (1) {
+				req_sz = pkt_next->len - dst_offset;
+				if (req_sz > src->len - src_offset)
+					req_sz = src->len - src_offset;
+
+				orig_data = src->data + src_offset;
 				dst_data = pkt_next->data + dst_offset;
 				memcpy(dst_data, orig_data, req_sz);
-				orig_offset += req_sz;
-				dst_offset += req_sz;
-				if (orig_offset == local_pkt_next->len) {
-					orig_offset = 0;
-					local_pkt_next = local_pkt_next->next;
+
+				src_offset += req_sz;
+				if (src_offset == src->len) {
+					src_offset = 0;
+					src = skb_peek_next(src, &local_list);
 				}
+				dst_offset += req_sz;
 				if (dst_offset == pkt_next->len)
 					break;
-			} while (!skb_queue_empty(&local_list));
+			}
 		}
 	}
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f17f602..a8303af 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -337,8 +337,6 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 		return;
 	}
 
-	wmb();		/* barrier so backend seens requests */
-
 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 	if (notify)
 		notify_remote_via_irq(queue->rx_irq);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 2012551..797fab3 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -121,18 +121,20 @@ int ptp_open(struct posix_clock *pc, fmode_t fmode)
 
 long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
 {
-	struct ptp_clock_caps caps;
-	struct ptp_clock_request req;
-	struct ptp_sys_offset *sysoff = NULL;
-	struct ptp_sys_offset_precise precise_offset;
-	struct ptp_pin_desc pd;
 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
-	struct ptp_clock_info *ops = ptp->info;
-	struct ptp_clock_time *pct;
-	struct timespec64 ts;
+	struct ptp_sys_offset_extended *extoff = NULL;
+	struct ptp_sys_offset_precise precise_offset;
 	struct system_device_crosststamp xtstamp;
-	int enable, err = 0;
+	struct ptp_clock_info *ops = ptp->info;
+	struct ptp_sys_offset *sysoff = NULL;
+	struct ptp_system_timestamp sts;
+	struct ptp_clock_request req;
+	struct ptp_clock_caps caps;
+	struct ptp_clock_time *pct;
 	unsigned int i, pin_index;
+	struct ptp_pin_desc pd;
+	struct timespec64 ts;
+	int enable, err = 0;
 
 	switch (cmd) {
 
@@ -211,6 +213,36 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
 			err = -EFAULT;
 		break;
 
+	case PTP_SYS_OFFSET_EXTENDED:
+		if (!ptp->info->gettimex64) {
+			err = -EOPNOTSUPP;
+			break;
+		}
+		extoff = memdup_user((void __user *)arg, sizeof(*extoff));
+		if (IS_ERR(extoff)) {
+			err = PTR_ERR(extoff);
+			extoff = NULL;
+			break;
+		}
+		if (extoff->n_samples > PTP_MAX_SAMPLES) {
+			err = -EINVAL;
+			break;
+		}
+		for (i = 0; i < extoff->n_samples; i++) {
+			err = ptp->info->gettimex64(ptp->info, &ts, &sts);
+			if (err)
+				goto out;
+			extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
+			extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
+			extoff->ts[i][1].sec = ts.tv_sec;
+			extoff->ts[i][1].nsec = ts.tv_nsec;
+			extoff->ts[i][2].sec = sts.post_ts.tv_sec;
+			extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
+		}
+		if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff)))
+			err = -EFAULT;
+		break;
+
 	case PTP_SYS_OFFSET:
 		sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
 		if (IS_ERR(sysoff)) {
@@ -228,7 +260,12 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
 			pct->sec = ts.tv_sec;
 			pct->nsec = ts.tv_nsec;
 			pct++;
-			ptp->info->gettime64(ptp->info, &ts);
+			if (ops->gettimex64)
+				err = ops->gettimex64(ops, &ts, NULL);
+			else
+				err = ops->gettime64(ops, &ts);
+			if (err)
+				goto out;
 			pct->sec = ts.tv_sec;
 			pct->nsec = ts.tv_nsec;
 			pct++;
@@ -281,6 +318,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
 		break;
 	}
 
+out:
+	kfree(extoff);
 	kfree(sysoff);
 	return err;
 }
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 5419a89d..8a81eec 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -117,7 +117,10 @@ static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 	int err;
 
-	err = ptp->info->gettime64(ptp->info, tp);
+	if (ptp->info->gettimex64)
+		err = ptp->info->gettimex64(ptp->info, tp, NULL);
+	else
+		err = ptp->info->gettime64(ptp->info, tp);
 	return err;
 }
 
@@ -249,8 +252,10 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
 	ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
 					     ptp, ptp->pin_attr_groups,
 					     "ptp%d", ptp->index);
-	if (IS_ERR(ptp->dev))
+	if (IS_ERR(ptp->dev)) {
+		err = PTR_ERR(ptp->dev);
 		goto no_device;
+	}
 
 	/* Register a new PPS source. */
 	if (info->pps) {
@@ -261,6 +266,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
 		pps.owner = info->owner;
 		ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
 		if (!ptp->pps_source) {
+			err = -EINVAL;
 			pr_err("failed to register pps source\n");
 			goto no_pps;
 		}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 04e294d..0ee0269 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -314,7 +314,7 @@ struct qeth_hdr_layer3 {
 	__u16 frame_offset;
 	union {
 		/* TX: */
-		u8 ipv6_addr[16];
+		struct in6_addr ipv6_addr;
 		struct ipv4 {
 			u8 res[12];
 			u32 addr;
@@ -665,7 +665,6 @@ struct qeth_card_blkt {
 
 #define QETH_BROADCAST_WITH_ECHO    0x01
 #define QETH_BROADCAST_WITHOUT_ECHO 0x02
-#define QETH_LAYER2_MAC_READ	    0x01
 #define QETH_LAYER2_MAC_REGISTERED  0x02
 struct qeth_card_info {
 	unsigned short unit_addr2;
@@ -775,7 +774,6 @@ struct qeth_switch_info {
 #define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
 
 struct qeth_card {
-	struct list_head list;
 	enum qeth_card_states state;
 	spinlock_t lock;
 	struct ccwgroup_device *gdev;
@@ -827,11 +825,6 @@ struct qeth_card {
 	struct work_struct close_dev_work;
 };
 
-struct qeth_card_list_struct {
-	struct list_head list;
-	rwlock_t rwlock;
-};
-
 struct qeth_trap_id {
 	__u16 lparnr;
 	char vmname[8];
@@ -978,11 +971,11 @@ int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 void qeth_core_free_discipline(struct qeth_card *);
 
 /* exports for qeth discipline device drivers */
-extern struct qeth_card_list_struct qeth_core_card_list;
 extern struct kmem_cache *qeth_core_header_cache;
 extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
 
 struct net_device *qeth_clone_netdev(struct net_device *orig);
+struct qeth_card *qeth_get_card_by_busid(char *bus_id);
 void qeth_set_recovery_task(struct qeth_card *);
 void qeth_clear_recovery_task(struct qeth_card *);
 void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
@@ -1025,9 +1018,6 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
 	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
 	void *reply_param);
 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
-int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
-			     struct qeth_hdr *hdr, unsigned int offset,
-			     unsigned int hd_len);
 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 			struct sk_buff *skb, struct qeth_hdr *hdr,
 			unsigned int offset, unsigned int hd_len,
@@ -1058,11 +1048,6 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
 				      struct net_device *dev,
 				      netdev_features_t features);
 int qeth_vm_request_mac(struct qeth_card *card);
-int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
-		       struct qeth_hdr **hdr, unsigned int hdr_len,
-		       unsigned int proto_len, unsigned int *elements);
-void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
-		       struct sk_buff *skb, unsigned int proto_len);
 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
 	      struct qeth_qdio_out_q *queue, int ipv, int cast_type,
 	      void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4bce5ae..ae97246 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -54,8 +54,6 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
 };
 EXPORT_SYMBOL_GPL(qeth_dbf);
 
-struct qeth_card_list_struct qeth_core_card_list;
-EXPORT_SYMBOL_GPL(qeth_core_card_list);
 struct kmem_cache *qeth_core_header_cache;
 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
 static struct kmem_cache *qeth_qdio_outbuf_cache;
@@ -2837,6 +2835,17 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
 	cmd->hdr.prot_version = prot;
 }
 
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
+{
+	u8 prot_type = qeth_mpc_select_prot_type(card);
+
+	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
+	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+}
+EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+
 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
 		enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
 {
@@ -2844,6 +2853,7 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
 
 	iob = qeth_get_buffer(&card->write);
 	if (iob) {
+		qeth_prepare_ipa_cmd(card, iob);
 		qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
 	} else {
 		dev_warn(&card->gdev->dev,
@@ -2856,17 +2866,6 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
 }
 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
 
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
-{
-	u8 prot_type = qeth_mpc_select_prot_type(card);
-
-	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
-	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
-	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
-	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
-}
-EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
-
 /**
  * qeth_send_ipa_cmd() - send an IPA command
  *
@@ -2881,7 +2880,6 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 	int rc;
 
 	QETH_CARD_TEXT(card, 4, "sendipa");
-	qeth_prepare_ipa_cmd(card, iob);
 	rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
 						iob, reply_cb, reply_param);
 	if (rc == -ETIME) {
@@ -3777,9 +3775,9 @@ EXPORT_SYMBOL_GPL(qeth_count_elements);
  * The number of needed buffer elements is returned in @elements.
  * Error to create the hdr is indicated by returning with < 0.
  */
-int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
-		       struct qeth_hdr **hdr, unsigned int hdr_len,
-		       unsigned int proto_len, unsigned int *elements)
+static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+			      struct qeth_hdr **hdr, unsigned int hdr_len,
+			      unsigned int proto_len, unsigned int *elements)
 {
 	const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
 	const unsigned int contiguous = proto_len ? proto_len : 1;
@@ -3849,7 +3847,6 @@ int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
 	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_add_hw_header);
 
 static void __qeth_fill_buffer(struct sk_buff *skb,
 			       struct qeth_qdio_out_buffer *buf,
@@ -3972,9 +3969,9 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
 	return flush_cnt;
 }
 
-int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
-			     struct qeth_hdr *hdr, unsigned int offset,
-			     unsigned int hd_len)
+static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
+				    struct sk_buff *skb, struct qeth_hdr *hdr,
+				    unsigned int offset, unsigned int hd_len)
 {
 	int index = queue->next_buf_to_fill;
 	struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
@@ -3990,7 +3987,6 @@ int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
 	qeth_flush_buffers(queue, index, 1);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
 
 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 			struct sk_buff *skb, struct qeth_hdr *hdr,
@@ -4082,8 +4078,9 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 }
 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
 
-void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
-		       struct sk_buff *skb, unsigned int proto_len)
+static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
+			      unsigned int payload_len, struct sk_buff *skb,
+			      unsigned int proto_len)
 {
 	struct qeth_hdr_ext_tso *ext = &hdr->ext;
 
@@ -4096,7 +4093,6 @@ void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
 	ext->mss = skb_shinfo(skb)->gso_size;
 	ext->dg_hdr_len = proto_len;
 }
-EXPORT_SYMBOL_GPL(qeth_fill_tso_ext);
 
 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
 	      struct qeth_qdio_out_q *queue, int ipv, int cast_type,
@@ -4119,7 +4115,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
 		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 	} else {
 		hw_hdr_len = sizeof(struct qeth_hdr);
-		proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
 	}
 
 	rc = skb_cow_head(skb, hw_hdr_len);
@@ -4235,16 +4231,18 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
 		struct qeth_reply *reply, unsigned long data)
 {
 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_ipacmd_setadpparms *adp_cmd;
 
 	QETH_CARD_TEXT(card, 4, "chgmaccb");
 	if (qeth_setadpparms_inspect_rc(cmd))
 		return 0;
 
-	if (IS_LAYER3(card) || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
-		ether_addr_copy(card->dev->dev_addr,
-				cmd->data.setadapterparms.data.change_addr.addr);
-		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
-	}
+	adp_cmd = &cmd->data.setadapterparms;
+	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
+	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
+		return 0;
+
+	ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
 	return 0;
 }
 
@@ -4499,9 +4497,6 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
 
 	QETH_CARD_TEXT(card, 4, "sendsnmp");
 
-	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
-	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
-	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
 	/* adjust PDU length fields in IPA_PDU_HEADER */
 	s1 = (u32) IPA_PDU_HEADER_SIZE + len;
 	s2 = (u32) len;
@@ -5480,34 +5475,11 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
 }
 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
 
-static int qeth_send_setassparms(struct qeth_card *card,
-				 struct qeth_cmd_buffer *iob, u16 len,
-				 long data, int (*reply_cb)(struct qeth_card *,
-							    struct qeth_reply *,
-							    unsigned long),
-				 void *reply_param)
-{
-	int rc;
-	struct qeth_ipa_cmd *cmd;
-
-	QETH_CARD_TEXT(card, 4, "sendassp");
-
-	cmd = __ipa_cmd(iob);
-	if (len <= sizeof(__u32))
-		cmd->data.setassparms.data.flags_32bit = (__u32) data;
-	else   /* (len > sizeof(__u32)) */
-		memcpy(&cmd->data.setassparms.data, (void *) data, len);
-
-	rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
-	return rc;
-}
-
 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
 				      enum qeth_ipa_funcs ipa_func,
 				      u16 cmd_code, long data,
 				      enum qeth_prot_versions prot)
 {
-	int rc;
 	int length = 0;
 	struct qeth_cmd_buffer *iob;
 
@@ -5517,9 +5489,9 @@ int qeth_send_simple_setassparms_prot(struct qeth_card *card,
 	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
 	if (!iob)
 		return -ENOMEM;
-	rc = qeth_send_setassparms(card, iob, length, data,
-				   qeth_setassparms_cb, NULL);
-	return rc;
+
+	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
+	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
 }
 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
 
@@ -5806,9 +5778,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 		break;
 	}
 
-	write_lock_irq(&qeth_core_card_list.rwlock);
-	list_add_tail(&card->list, &qeth_core_card_list.list);
-	write_unlock_irq(&qeth_core_card_list.rwlock);
 	return 0;
 
 err_disc:
@@ -5833,9 +5802,6 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
 		qeth_core_free_discipline(card);
 	}
 
-	write_lock_irq(&qeth_core_card_list.rwlock);
-	list_del(&card->list);
-	write_unlock_irq(&qeth_core_card_list.rwlock);
 	free_netdev(card->dev);
 	qeth_core_free_card(card);
 	put_device(&gdev->dev);
@@ -5950,6 +5916,21 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
 	.restore = qeth_core_restore,
 };
 
+struct qeth_card *qeth_get_card_by_busid(char *bus_id)
+{
+	struct ccwgroup_device *gdev;
+	struct qeth_card *card;
+
+	gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
+	if (!gdev)
+		return NULL;
+
+	card = dev_get_drvdata(&gdev->dev);
+	put_device(&gdev->dev);
+	return card;
+}
+EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
+
 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct qeth_card *card = dev->ml_priv;
@@ -6381,16 +6362,16 @@ static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
 				     enum qeth_prot_versions prot)
 {
 	struct qeth_cmd_buffer *iob;
-	int rc = -ENOMEM;
 
 	QETH_CARD_TEXT(card, 4, "chkdocmd");
 	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
 				       sizeof(__u32), prot);
-	if (iob)
-		rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
-					   qeth_ipa_checksum_run_cmd_cb,
-					   chksum_cb);
-	return rc;
+	if (!iob)
+		return -ENOMEM;
+
+	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
+	return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb,
+				 chksum_cb);
 }
 
 static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
@@ -6488,8 +6469,7 @@ static int qeth_set_tso_on(struct qeth_card *card,
 	if (!iob)
 		return -ENOMEM;
 
-	rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */,
-				   qeth_start_tso_cb, &tso_data);
+	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
 	if (rc)
 		return rc;
 
@@ -6506,10 +6486,9 @@ static int qeth_set_tso_on(struct qeth_card *card,
 	}
 
 	/* enable TSO capability */
-	caps.supported = 0;
-	caps.enabled = QETH_IPA_LARGE_SEND_TCP;
-	rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps,
-				   qeth_setassparms_get_caps_cb, &caps);
+	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
+		QETH_IPA_LARGE_SEND_TCP;
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
 	if (rc) {
 		qeth_set_tso_off(card, prot);
 		return rc;
@@ -6688,8 +6667,6 @@ static int __init qeth_core_init(void)
 	int rc;
 
 	pr_info("loading core functions\n");
-	INIT_LIST_HEAD(&qeth_core_card_list.list);
-	rwlock_init(&qeth_core_card_list.rwlock);
 
 	qeth_wq = create_singlethread_workqueue("qeth_wq");
 	if (!qeth_wq) {
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index e891c0b..16fc51a 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -144,7 +144,6 @@ unsigned char IPA_PDU_HEADER[] = {
 		sizeof(struct qeth_ipa_cmd) % 256,
 	0x00, 0x00, 0x00, 0x40,
 };
-EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
 
 struct ipa_rc_msg {
 	enum qeth_ipa_return_codes rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 3e54be2..1ab3219 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -80,7 +80,9 @@ enum qeth_card_types {
 };
 
 #define IS_IQD(card)	((card)->info.type == QETH_CARD_TYPE_IQD)
+#define IS_OSD(card)	((card)->info.type == QETH_CARD_TYPE_OSD)
 #define IS_OSN(card)	((card)->info.type == QETH_CARD_TYPE_OSN)
+#define IS_VM_NIC(card)	((card)->info.guestlan)
 
 #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
 /* only the first two bytes are looked at in qeth_get_cardname_short */
@@ -529,17 +531,20 @@ struct qeth_query_switch_attributes {
 	__u8  reserved3[8];
 };
 
+#define QETH_SETADP_FLAGS_VIRTUAL_MAC	0x80	/* for CHANGE_ADDR_READ_MAC */
+
 struct qeth_ipacmd_setadpparms_hdr {
-	__u32 supp_hw_cmds;
-	__u32 reserved1;
-	__u16 cmdlength;
-	__u16 reserved2;
-	__u32 command_code;
-	__u16 return_code;
-	__u8  used_total;
-	__u8  seq_no;
-	__u32 reserved3;
-} __attribute__ ((packed));
+	u32 supp_hw_cmds;
+	u32 reserved1;
+	u16 cmdlength;
+	u16 reserved2;
+	u32 command_code;
+	u16 return_code;
+	u8 used_total;
+	u8 seq_no;
+	u8 flags;
+	u8 reserved3[3];
+};
 
 struct qeth_ipacmd_setadpparms {
 	struct qeth_ipacmd_setadpparms_hdr hdr;
@@ -828,10 +833,9 @@ enum qeth_ipa_arp_return_codes {
 extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
 extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
 
-#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
-			       sizeof(struct qeth_ipacmd_setassparms_hdr))
-#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
-				       QETH_SETASS_BASE_LEN)
+#define QETH_SETASS_BASE_LEN (IPA_PDU_HEADER_SIZE + \
+			      sizeof(struct qeth_ipacmd_hdr) + \
+			      sizeof(struct qeth_ipacmd_setassparms_hdr))
 #define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
 			      sizeof(struct qeth_ipacmd_setadpparms_hdr))
 #define QETH_SNMP_SETADP_CMDLENGTH 16
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2914a1a..2836231 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -36,28 +36,6 @@ static void qeth_l2_vnicc_init(struct qeth_card *card);
 static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
 					  u32 *timeout);
 
-static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
-{
-	struct qeth_card *card;
-	struct net_device *ndev;
-	__u16 temp_dev_no;
-	unsigned long flags;
-	struct ccw_dev_id read_devid;
-
-	ndev = NULL;
-	memcpy(&temp_dev_no, read_dev_no, 2);
-	read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
-	list_for_each_entry(card, &qeth_core_card_list.list, list) {
-		ccw_device_get_id(CARD_RDEV(card), &read_devid);
-		if (read_devid.devno == temp_dev_no) {
-			ndev = card->dev;
-			break;
-		}
-	}
-	read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
-	return ndev;
-}
-
 static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
 {
 	int rc;
@@ -461,12 +439,9 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
 		/* fall back to alternative mechanism: */
 	}
 
-	if (card->info.type == QETH_CARD_TYPE_IQD ||
-	    card->info.type == QETH_CARD_TYPE_OSM ||
-	    card->info.type == QETH_CARD_TYPE_OSX ||
-	    card->info.guestlan) {
+	if (!IS_OSN(card)) {
 		rc = qeth_setadpparms_change_macaddr(card);
-		if (!rc)
+		if (!rc && is_valid_ether_addr(card->dev->dev_addr))
 			goto out;
 		QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
 				 CARD_DEVID(card), rc);
@@ -917,7 +892,8 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
 				       PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
 	}
 
-	qeth_l2_request_initial_mac(card);
+	if (!is_valid_ether_addr(card->dev->dev_addr))
+		qeth_l2_request_initial_mac(card);
 	netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
 	rc = register_netdev(card->dev);
 	if (!rc && carrier_ok)
@@ -1288,13 +1264,16 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
 		  int (*data_cb)(struct sk_buff *))
 {
 	struct qeth_card *card;
+	char bus_id[16];
+	u16 devno;
 
-	*dev = qeth_l2_netdev_by_devno(read_dev_no);
-	if (*dev == NULL)
+	memcpy(&devno, read_dev_no, 2);
+	sprintf(bus_id, "0.0.%04x", devno);
+	card = qeth_get_card_by_busid(bus_id);
+	if (!card || !IS_OSN(card))
 		return -ENODEV;
-	card = (*dev)->ml_priv;
-	if (!card)
-		return -ENODEV;
+	*dev = card->dev;
+
 	QETH_CARD_TEXT(card, 2, "osnreg");
 	if ((assist_cb == NULL) || (data_cb == NULL))
 		return -EINVAL;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f08b745..eca68da3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -949,9 +949,6 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
 	if (cmd->hdr.return_code == 0)
 		ether_addr_copy(card->dev->dev_addr,
 				cmd->data.create_destroy_addr.unique_id);
-	else
-		eth_random_addr(card->dev->dev_addr);
-
 	return 0;
 }
 
@@ -1685,21 +1682,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
 	return 0;
 }
 
-static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
-		struct qeth_cmd_buffer *iob, int len,
-		int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
-			unsigned long),
-		void *reply_param)
-{
-	QETH_CARD_TEXT(card, 4, "sendarp");
-
-	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
-	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
-	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
-	return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
-				      reply_cb, reply_param);
-}
-
 static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
 	enum qeth_prot_versions prot,
 	struct qeth_arp_query_info *qinfo)
@@ -1719,11 +1701,9 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
 		return -ENOMEM;
 	cmd = __ipa_cmd(iob);
 	cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
-	cmd->data.setassparms.data.query_arp.reply_bits = 0;
-	cmd->data.setassparms.data.query_arp.no_entries = 0;
-	rc = qeth_l3_send_ipa_arp_cmd(card, iob,
-			   QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
-			   qeth_l3_arp_query_cb, (void *)qinfo);
+	rc = qeth_send_control_data(card,
+				    QETH_SETASS_BASE_LEN + QETH_ARP_CMD_LEN,
+				    iob, qeth_l3_arp_query_cb, qinfo);
 	if (rc)
 		QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
 				 CARD_DEVID(card), rc);
@@ -1929,22 +1909,6 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
 	}
 }
 
-static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
-				     unsigned int data_len)
-{
-	char daddr[16];
-
-	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
-	hdr->hdr.l3.length = data_len;
-	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
-
-	memset(daddr, 0, sizeof(daddr));
-	daddr[0] = 0xfe;
-	daddr[1] = 0x80;
-	memcpy(&daddr[8], iucv_trans_hdr(skb)->destUserID, 8);
-	memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
-}
-
 static u8 qeth_l3_cast_type_to_flag(int cast_type)
 {
 	if (cast_type == RTN_MULTICAST)
@@ -1960,6 +1924,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 				struct sk_buff *skb, int ipv, int cast_type,
 				unsigned int data_len)
 {
+	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
 	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
 
 	hdr->hdr.l3.length = data_len;
@@ -1968,6 +1933,15 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 		hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
 	} else {
 		hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+
+		if (skb->protocol == htons(ETH_P_AF_IUCV)) {
+			l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+			l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80);
+			memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2],
+			       iucv_trans_hdr(skb)->destUserID, 8);
+			return;
+		}
+
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
 			/* some HW requires combined L3+L4 csum offload: */
@@ -2012,13 +1986,11 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 	} else {
 		/* IPv6 */
 		const struct rt6_info *rt = skb_rt6_info(skb);
-		const struct in6_addr *next_hop;
 
 		if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
-			next_hop = &rt->rt6i_gateway;
+			l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
 		else
-			next_hop = &ipv6_hdr(skb)->daddr;
-		memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
+			l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
 
 		hdr->hdr.l3.flags |= QETH_HDR_IPV6;
 		if (card->info.type != QETH_CARD_TYPE_IQD)
@@ -2044,84 +2016,25 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
 			struct qeth_qdio_out_q *queue, int ipv, int cast_type)
 {
-	unsigned int hw_hdr_len, proto_len, frame_len, elements;
 	unsigned char eth_hdr[ETH_HLEN];
-	bool is_tso = skb_is_gso(skb);
-	unsigned int data_offset = 0;
-	struct qeth_hdr *hdr = NULL;
-	unsigned int hd_len = 0;
-	int push_len, rc;
-	bool is_sg;
-
-	if (is_tso) {
-		hw_hdr_len = sizeof(struct qeth_hdr_tso);
-		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) -
-			    ETH_HLEN;
-	} else {
-		hw_hdr_len = sizeof(struct qeth_hdr);
-		proto_len = 0;
-	}
+	unsigned int hw_hdr_len;
+	int rc;
 
 	/* re-use the L2 header area for the HW header: */
+	hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) :
+				       sizeof(struct qeth_hdr);
 	rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
 	if (rc)
 		return rc;
 	skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
 	skb_pull(skb, ETH_HLEN);
-	frame_len = skb->len;
 
 	qeth_l3_fixup_headers(skb);
-	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
-				      &elements);
-	if (push_len < 0)
-		return push_len;
-	if (is_tso || !push_len) {
-		/* HW header needs its own buffer element. */
-		hd_len = hw_hdr_len + proto_len;
-		data_offset = push_len + proto_len;
-	}
-	memset(hdr, 0, hw_hdr_len);
-
-	if (skb->protocol == htons(ETH_P_AF_IUCV)) {
-		qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
-	} else {
-		qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
-		if (is_tso)
-			qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
-					  frame_len - proto_len, skb,
-					  proto_len);
-	}
-
-	is_sg = skb_is_nonlinear(skb);
-	if (IS_IQD(card)) {
-		rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
-					      hd_len);
-	} else {
-		/* TODO: drop skb_orphan() once TX completion is fast enough */
-		skb_orphan(skb);
-		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
-					 hd_len, elements);
-	}
-
-	if (!rc) {
-		if (card->options.performance_stats) {
-			card->perf_stats.buf_elements_sent += elements;
-			if (is_sg)
-				card->perf_stats.sg_skbs_sent++;
-			if (is_tso) {
-				card->perf_stats.large_send_bytes += frame_len;
-				card->perf_stats.large_send_cnt++;
-			}
-		}
-	} else {
-		if (!push_len)
-			kmem_cache_free(qeth_core_header_cache, hdr);
-		if (rc == -EBUSY) {
-			/* roll back to ETH header */
-			skb_pull(skb, push_len);
-			skb_push(skb, ETH_HLEN);
-			skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
-		}
+	rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
+	if (rc == -EBUSY) {
+		/* roll back to ETH header */
+		skb_push(skb, ETH_HLEN);
+		skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
 	}
 	return rc;
 }
@@ -2366,9 +2279,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
 		rc = qeth_l3_iqd_read_initial_mac(card);
 		if (rc)
 			goto out;
-
-		if (card->options.hsuid[0])
-			memcpy(card->dev->perm_addr, card->options.hsuid, 9);
 	} else
 		return -ENODEV;
 
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 064ef57..907dd87 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1767,8 +1767,7 @@ static int init_act_open(struct cxgbi_sock *csk)
 		csk->mtu = dst_mtu(csk->dst);
 	cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
 	csk->tx_chan = cxgb4_port_chan(ndev);
-	csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
-					 cxgb4_port_viid(ndev));
+	csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
 	step = lldi->ntxq / lldi->nchan;
 	csk->txq_idx = cxgb4_port_idx(ndev) * step;
 	step = lldi->nrxq / lldi->nchan;
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 5ce2471..52c153c 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -36,6 +36,8 @@
 #define MAX_IRQNAME	16	/* big enough for "QMan portal %d" */
 #define QMAN_POLL_LIMIT 32
 #define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_DQRR_IT_MAX 15
+#define QMAN_ITP_MAX 0xFFF
 #define QMAN_PIRQ_MR_ITHRESH 4
 #define QMAN_PIRQ_IPERIOD 100
 
@@ -727,9 +729,15 @@ static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
 	qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
 }
 
-static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
 {
+
+	if (ithresh > QMAN_DQRR_IT_MAX)
+		return -EINVAL;
+
 	qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+
+	return 0;
 }
 
 /* --- MR API --- */
@@ -1012,20 +1020,27 @@ static inline void put_affine_portal(void)
 
 static struct workqueue_struct *qm_portal_wq;
 
-void qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
 {
-	if (!portal)
-		return;
+	int res;
 
-	qm_dqrr_set_ithresh(&portal->p, ithresh);
+	if (!portal)
+		return -EINVAL;
+
+	res = qm_dqrr_set_ithresh(&portal->p, ithresh);
+	if (res)
+		return res;
+
 	portal->p.dqrr.ithresh = ithresh;
+
+	return 0;
 }
 EXPORT_SYMBOL(qman_dqrr_set_ithresh);
 
 void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
 {
 	if (portal && ithresh)
-		*ithresh = portal->p.dqrr.ithresh;
+		*ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
 }
 EXPORT_SYMBOL(qman_dqrr_get_ithresh);
 
@@ -1036,10 +1051,14 @@ void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
 }
 EXPORT_SYMBOL(qman_portal_get_iperiod);
 
-void qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
 {
-	if (portal)
-		qm_out(&portal->p, QM_REG_ITPR, iperiod);
+	if (!portal || iperiod > QMAN_ITP_MAX)
+		return -EINVAL;
+
+	qm_out(&portal->p, QM_REG_ITPR, iperiod);
+
+	return 0;
 }
 EXPORT_SYMBOL(qman_portal_set_iperiod);
 
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 7a7ca67..4fa37d6 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -719,9 +719,6 @@ static int port_vlans_add(struct net_device *netdev,
 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
 	int vid, err = 0;
 
-	if (netif_is_bridge_master(vlan->obj.orig_dev))
-		return -EOPNOTSUPP;
-
 	if (switchdev_trans_ph_prepare(trans))
 		return 0;
 
@@ -930,8 +927,6 @@ static int swdev_port_obj_del(struct net_device *netdev,
 static const struct switchdev_ops ethsw_port_switchdev_ops = {
 	.switchdev_port_attr_get	= swdev_port_attr_get,
 	.switchdev_port_attr_set	= swdev_port_attr_set,
-	.switchdev_port_obj_add		= swdev_port_obj_add,
-	.switchdev_port_obj_del		= swdev_port_obj_del,
 };
 
 /* For the moment, only flood setting needs to be updated */
@@ -972,6 +967,11 @@ static int port_bridge_leave(struct net_device *netdev)
 	return err;
 }
 
+static bool ethsw_port_dev_check(const struct net_device *netdev)
+{
+	return netdev->netdev_ops == &ethsw_port_ops;
+}
+
 static int port_netdevice_event(struct notifier_block *unused,
 				unsigned long event, void *ptr)
 {
@@ -980,7 +980,7 @@ static int port_netdevice_event(struct notifier_block *unused,
 	struct net_device *upper_dev;
 	int err = 0;
 
-	if (netdev->netdev_ops != &ethsw_port_ops)
+	if (!ethsw_port_dev_check(netdev))
 		return NOTIFY_DONE;
 
 	/* Handle just upper dev link/unlink for the moment */
@@ -1083,10 +1083,51 @@ static int port_switchdev_event(struct notifier_block *unused,
 	return NOTIFY_BAD;
 }
 
+static int
+ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
+			struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+	int err = -EOPNOTSUPP;
+
+	switch (event) {
+	case SWITCHDEV_PORT_OBJ_ADD:
+		err = swdev_port_obj_add(netdev, port_obj_info->obj,
+					 port_obj_info->trans);
+		break;
+	case SWITCHDEV_PORT_OBJ_DEL:
+		err = swdev_port_obj_del(netdev, port_obj_info->obj);
+		break;
+	}
+
+	port_obj_info->handled = true;
+	return notifier_from_errno(err);
+}
+
+static int port_switchdev_blocking_event(struct notifier_block *unused,
+					 unsigned long event, void *ptr)
+{
+	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+	if (!ethsw_port_dev_check(dev))
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
+	case SWITCHDEV_PORT_OBJ_DEL:
+		return ethsw_switchdev_port_obj_event(event, dev, ptr);
+	}
+
+	return NOTIFY_DONE;
+}
+
 static struct notifier_block port_switchdev_nb = {
 	.notifier_call = port_switchdev_event,
 };
 
+static struct notifier_block port_switchdev_blocking_nb = {
+	.notifier_call = port_switchdev_blocking_event,
+};
+
 static int ethsw_register_notifier(struct device *dev)
 {
 	int err;
@@ -1103,8 +1144,16 @@ static int ethsw_register_notifier(struct device *dev)
 		goto err_switchdev_nb;
 	}
 
+	err = register_switchdev_blocking_notifier(&port_switchdev_blocking_nb);
+	if (err) {
+		dev_err(dev, "Failed to register switchdev blocking notifier\n");
+		goto err_switchdev_blocking_nb;
+	}
+
 	return 0;
 
+err_switchdev_blocking_nb:
+	unregister_switchdev_notifier(&port_switchdev_nb);
 err_switchdev_nb:
 	unregister_netdevice_notifier(&port_nb);
 	return err;
@@ -1291,8 +1340,15 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
 
 static void ethsw_unregister_notifier(struct device *dev)
 {
+	struct notifier_block *nb;
 	int err;
 
+	nb = &port_switchdev_blocking_nb;
+	err = unregister_switchdev_blocking_notifier(nb);
+	if (err)
+		dev_err(dev,
+			"Failed to unregister switchdev blocking notifier (%d)\n", err);
+
 	err = unregister_switchdev_notifier(&port_switchdev_nb);
 	if (err)
 		dev_err(dev,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 71888b9..0376d73 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -932,8 +932,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 			goto out;
 		csk->mtu = ndev->mtu;
 		csk->tx_chan = cxgb4_port_chan(ndev);
-		csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
-						 cxgb4_port_viid(ndev));
+		csk->smac_idx =
+			       ((struct port_info *)netdev_priv(ndev))->smt_idx;
 		step = cdev->lldi.ntxq /
 			cdev->lldi.nchan;
 		csk->txq_idx = cxgb4_port_idx(ndev) * step;
@@ -968,8 +968,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 		port_id = cxgb4_port_idx(ndev);
 		csk->mtu = dst_mtu(dst);
 		csk->tx_chan = cxgb4_port_chan(ndev);
-		csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
-						 cxgb4_port_viid(ndev));
+		csk->smac_idx =
+			       ((struct port_info *)netdev_priv(ndev))->smt_idx;
 		step = cdev->lldi.ntxq /
 			cdev->lldi.nports;
 		csk->txq_idx = (port_id * step) +
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ab11b2b..d919284 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -141,6 +141,10 @@ struct vhost_net {
 	unsigned tx_zcopy_err;
 	/* Flush in progress. Protected by tx vq lock. */
 	bool tx_flush;
+	/* Private page frag */
+	struct page_frag page_frag;
+	/* Refcount bias of page frag */
+	int refcnt_bias;
 };
 
 static unsigned vhost_net_zcopy_mask __read_mostly;
@@ -637,14 +641,53 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
 	       !vhost_vq_avail_empty(vq->dev, vq);
 }
 
+#define SKB_FRAG_PAGE_ORDER     get_order(32768)
+
+static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
+				       struct page_frag *pfrag, gfp_t gfp)
+{
+	if (pfrag->page) {
+		if (pfrag->offset + sz <= pfrag->size)
+			return true;
+		__page_frag_cache_drain(pfrag->page, net->refcnt_bias);
+	}
+
+	pfrag->offset = 0;
+	net->refcnt_bias = 0;
+	if (SKB_FRAG_PAGE_ORDER) {
+		/* Avoid direct reclaim but allow kswapd to wake */
+		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
+					  __GFP_COMP | __GFP_NOWARN |
+					  __GFP_NORETRY,
+					  SKB_FRAG_PAGE_ORDER);
+		if (likely(pfrag->page)) {
+			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
+			goto done;
+		}
+	}
+	pfrag->page = alloc_page(gfp);
+	if (likely(pfrag->page)) {
+		pfrag->size = PAGE_SIZE;
+		goto done;
+	}
+	return false;
+
+done:
+	net->refcnt_bias = USHRT_MAX;
+	page_ref_add(pfrag->page, USHRT_MAX - 1);
+	return true;
+}
+
 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
 
 static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
 			       struct iov_iter *from)
 {
 	struct vhost_virtqueue *vq = &nvq->vq;
+	struct vhost_net *net = container_of(vq->dev, struct vhost_net,
+					     dev);
 	struct socket *sock = vq->private_data;
-	struct page_frag *alloc_frag = &current->task_frag;
+	struct page_frag *alloc_frag = &net->page_frag;
 	struct virtio_net_hdr *gso;
 	struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
 	struct tun_xdp_hdr *hdr;
@@ -665,7 +708,8 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
 
 	buflen += SKB_DATA_ALIGN(len + pad);
 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
-	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
+	if (unlikely(!vhost_net_page_frag_refill(net, buflen,
+						 alloc_frag, GFP_KERNEL)))
 		return -ENOMEM;
 
 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
@@ -703,7 +747,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
 	xdp->data_end = xdp->data + len;
 	hdr->buflen = buflen;
 
-	get_page(alloc_frag->page);
+	--net->refcnt_bias;
 	alloc_frag->offset += buflen;
 
 	++nvq->batched_xdp;
@@ -1292,6 +1336,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
 
 	f->private_data = n;
+	n->page_frag.page = NULL;
+	n->refcnt_bias = 0;
 
 	return 0;
 }
@@ -1366,6 +1412,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
 	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
 	kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
 	kfree(n->dev.vqs);
+	if (n->page_frag.page)
+		__page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
 	kvfree(n);
 	return 0;
 }
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index b248805..7605b59 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -171,7 +171,7 @@ struct virtchnl_msg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
 
-/* Message descriptions and data structures.*/
+/* Message descriptions and data structures. */
 
 /* VIRTCHNL_OP_VERSION
  * VF posts its version number to the PF. PF responds with its version number
@@ -342,6 +342,8 @@ struct virtchnl_vsi_queue_config_info {
 	struct virtchnl_queue_pair_info qpair[1];
 };
 
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
 /* VIRTCHNL_OP_REQUEST_QUEUES
  * VF sends this message to request the PF to allocate additional queues to
  * this VF.  Each VF gets a guaranteed number of queues on init but asking for
@@ -357,8 +359,6 @@ struct virtchnl_vf_res_request {
 	u16 num_queue_pairs;
 };
 
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
-
 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
  * VF uses this message to map vectors to queues.
  * The rxq_map and txq_map fields are bitmaps used to indicate which queues
@@ -819,8 +819,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 		if (msglen >= valid_len) {
 			struct virtchnl_tc_info *vti =
 				(struct virtchnl_tc_info *)msg;
-			valid_len += vti->num_tc *
-				sizeof(struct virtchnl_channel_info);
+			valid_len += (vti->num_tc - 1) *
+				     sizeof(struct virtchnl_channel_info);
 			if (vti->num_tc == 0)
 				err_msg_format = true;
 		}
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 949e9af..9cd00a3 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -28,6 +28,7 @@
 #define PHY_ID_BCM89610			0x03625cd0
 
 #define PHY_ID_BCM7250			0xae025280
+#define PHY_ID_BCM7255			0xae025120
 #define PHY_ID_BCM7260			0xae025190
 #define PHY_ID_BCM7268			0xae025090
 #define PHY_ID_BCM7271			0xae0253b0
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c20c7e1..ef7c3d3 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -119,6 +119,7 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
 struct net_device *br_fdb_find_port(const struct net_device *br_dev,
 				    const unsigned char *addr,
 				    __u16 vid);
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
 #else
 static inline struct net_device *
 br_fdb_find_port(const struct net_device *br_dev,
@@ -127,6 +128,11 @@ br_fdb_find_port(const struct net_device *br_dev,
 {
 	return NULL;
 }
+static inline bool
+br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+	return false;
+}
 #endif
 
 #endif
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 83ea4df..4cca4da 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -65,8 +65,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 
 #define VLAN_PRIO_MASK		0xe000 /* Priority Code Point */
 #define VLAN_PRIO_SHIFT		13
-#define VLAN_CFI_MASK		0x1000 /* Canonical Format Indicator */
-#define VLAN_TAG_PRESENT	VLAN_CFI_MASK
+#define VLAN_CFI_MASK		0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
 #define VLAN_VID_MASK		0x0fff /* VLAN Identifier */
 #define VLAN_N_VID		4096
 
@@ -78,10 +77,11 @@ static inline bool is_vlan_dev(const struct net_device *dev)
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
 
-#define skb_vlan_tag_present(__skb)	((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define skb_vlan_tag_get(__skb)		((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_present(__skb)	((__skb)->vlan_present)
+#define skb_vlan_tag_get(__skb)		((__skb)->vlan_tci)
 #define skb_vlan_tag_get_id(__skb)	((__skb)->vlan_tci & VLAN_VID_MASK)
-#define skb_vlan_tag_get_prio(__skb)	((__skb)->vlan_tci & VLAN_PRIO_MASK)
+#define skb_vlan_tag_get_cfi(__skb)	(!!((__skb)->vlan_tci & VLAN_CFI_MASK))
+#define skb_vlan_tag_get_prio(__skb)	(((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
 
 static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
 {
@@ -133,6 +133,9 @@ struct vlan_pcpu_stats {
 
 extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
 					       __be16 vlan_proto, u16 vlan_id);
+extern int vlan_for_each(struct net_device *dev,
+			 int (*action)(struct net_device *dev, int vid,
+				       void *arg), void *arg);
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
 extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -236,6 +239,14 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
 	return NULL;
 }
 
+static inline int
+vlan_for_each(struct net_device *dev,
+	      int (*action)(struct net_device *dev, int vid, void *arg),
+	      void *arg)
+{
+	return 0;
+}
+
 static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
 	BUG();
@@ -461,6 +472,31 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
 	return skb;
 }
 
+/**
+ * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
+ * @skb: skbuff to clear
+ *
+ * Clears the VLAN information from @skb
+ */
+static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
+{
+	skb->vlan_present = 0;
+}
+
+/**
+ * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
+ * @dst: skbuff to copy to
+ * @src: skbuff to copy from
+ *
+ * Copies VLAN information from @src to @dst (for branchless code)
+ */
+static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
+{
+	dst->vlan_present = src->vlan_present;
+	dst->vlan_proto = src->vlan_proto;
+	dst->vlan_tci = src->vlan_tci;
+}
+
 /*
  * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
  * @skb: skbuff to tag
@@ -475,7 +511,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
 	skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 					skb_vlan_tag_get(skb));
 	if (likely(skb))
-		skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(skb);
 	return skb;
 }
 
@@ -491,7 +527,8 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
 					  __be16 vlan_proto, u16 vlan_tci)
 {
 	skb->vlan_proto = vlan_proto;
-	skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+	skb->vlan_tci = vlan_tci;
+	skb->vlan_present = 1;
 }
 
 /**
@@ -531,8 +568,6 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
 	}
 }
 
-#define HAVE_VLAN_GET_TAG
-
 /**
  * vlan_get_tag - get the VLAN ID from the skb
  * @skb: skbuff to query
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 2da85b0..fb7ae4a 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -209,7 +209,7 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
 
 /**
  * linkmode_adv_to_mii_ctrl1000_t
- * advertising: the linkmode advertisement settings
+ * @advertising: the linkmode advertisement settings
  *
  * A small helper function that translates linkmode advertisement
  * settings to phy autonegotiation advertisements for the
@@ -288,6 +288,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
 }
 
 /**
+ * mii_stat1000_to_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000 bits, when in
+ * 1000Base-T mode, to linkmode advertisement settings.
+ */
+static inline void mii_stat1000_to_linkmode_lpa_t(unsigned long *advertising,
+						  u32 lpa)
+{
+	if (lpa & LPA_1000HALF)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+				 advertising);
+	if (lpa & LPA_1000FULL)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				 advertising);
+}
+
+/**
  * ethtool_adv_to_mii_adv_x
  * @ethadv: the ethtool advertisement settings
  *
@@ -385,19 +404,38 @@ static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
 }
 
 /**
- * ethtool_adv_to_lcl_adv_t
- * @advertising:pointer to ethtool advertising
+ * mii_lpa_to_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
  *
- * A small helper function that translates ethtool advertising to LVL
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings.
+ */
+static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising,
+					     u32 lpa)
+{
+	if (lpa & LPA_LPACK)
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				 lp_advertising);
+
+	mii_adv_to_linkmode_adv_t(lp_advertising, lpa);
+}
+
+/**
+ * linkmode_adv_to_lcl_adv_t
+ * @advertising:pointer to linkmode advertising
+ *
+ * A small helper function that translates linkmode advertising to LVL
  * pause capabilities.
  */
-static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
+static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
 {
 	u32 lcl_adv = 0;
 
-	if (advertising & ADVERTISED_Pause)
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+			      advertising))
 		lcl_adv |= ADVERTISE_PAUSE_CAP;
-	if (advertising & ADVERTISED_Asym_Pause)
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+			      advertising))
 		lcl_adv |= ADVERTISE_PAUSE_ASYM;
 
 	return lcl_adv;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index dca6ab4..36e412c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -226,6 +226,7 @@ enum {
 	MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
 	MLX4_DEV_CAP_FLAG2_USER_MAC_EN		= 1ULL << 38,
 	MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
+	MLX4_DEV_CAP_FLAG2_SW_CQ_INIT           = 1ULL << 40,
 };
 
 enum {
@@ -1136,7 +1137,8 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
 
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
-		  unsigned vector, int collapsed, int timestamp_en);
+		  unsigned int vector, int collapsed, int timestamp_en,
+		  void *buf_addr, bool user_cq);
 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
 			  int *base, u8 flags, u8 usage);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 857f8ab..4b4207e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -845,6 +845,8 @@ enum tc_setup_type {
 	TC_SETUP_QDISC_PRIO,
 	TC_SETUP_QDISC_MQ,
 	TC_SETUP_QDISC_ETF,
+	TC_SETUP_ROOT_QDISC,
+	TC_SETUP_QDISC_GRED,
 };
 
 /* These structures hold the attributes of bpf state that are being passed
@@ -2388,13 +2390,13 @@ struct pcpu_sw_netstats {
 	u64     tx_packets;
 	u64     tx_bytes;
 	struct u64_stats_sync   syncp;
-};
+} __aligned(4 * sizeof(u64));
 
 struct pcpu_lstats {
 	u64 packets;
 	u64 bytes;
 	struct u64_stats_sync syncp;
-};
+} __aligned(2 * sizeof(u64));
 
 #define __netdev_alloc_pcpu_stats(type, gfp)				\
 ({									\
@@ -4068,6 +4070,16 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
 		       int (*sync)(struct net_device *, const unsigned char *),
 		       int (*unsync)(struct net_device *,
 				     const unsigned char *));
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+			   struct net_device *dev,
+			   int (*sync)(struct net_device *,
+				       const unsigned char *, int),
+			   int (*unsync)(struct net_device *,
+					 const unsigned char *, int));
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+			      struct net_device *dev,
+			      int (*unsync)(struct net_device *,
+					    const unsigned char *, int));
 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
 			  struct net_device *dev,
 			  int (*unsync)(struct net_device *,
@@ -4332,9 +4344,10 @@ static inline bool can_checksum_protocol(netdev_features_t features,
 }
 
 #ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
 #else
-static inline void netdev_rx_csum_fault(struct net_device *dev)
+static inline void netdev_rx_csum_fault(struct net_device *dev,
+					struct sk_buff *skb)
 {
 }
 #endif
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
new file mode 100644
index 0000000..34f38c1
--- /dev/null
+++ b/include/linux/objagg.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _OBJAGG_H
+#define _OBJAGG_H
+
+struct objagg_ops {
+	size_t obj_size;
+	void * (*delta_create)(void *priv, void *parent_obj, void *obj);
+	void (*delta_destroy)(void *priv, void *delta_priv);
+	void * (*root_create)(void *priv, void *obj);
+	void (*root_destroy)(void *priv, void *root_priv);
+};
+
+struct objagg;
+struct objagg_obj;
+
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
+
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
+struct objagg *objagg_create(const struct objagg_ops *ops, void *priv);
+void objagg_destroy(struct objagg *objagg);
+
+struct objagg_obj_stats {
+	unsigned int user_count;
+	unsigned int delta_user_count; /* includes delta object users */
+};
+
+struct objagg_obj_stats_info {
+	struct objagg_obj_stats stats;
+	struct objagg_obj *objagg_obj; /* associated object */
+	bool is_root;
+};
+
+struct objagg_stats {
+	unsigned int stats_info_count;
+	struct objagg_obj_stats_info stats_info[];
+};
+
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
+void objagg_stats_put(const struct objagg_stats *objagg_stats);
+
+#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 69f0abe..144de2e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2359,6 +2359,8 @@
 
 #define PCI_VENDOR_ID_SYNOPSYS		0x16c3
 
+#define PCI_VENDOR_ID_USR		0x16ec
+
 #define PCI_VENDOR_ID_VITESSE		0x1725
 #define PCI_DEVICE_ID_VITESSE_VSC7174	0x7174
 
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3ea87f7..8f92724 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -58,6 +58,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
 #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
 #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
 
+extern const int phy_10_100_features_array[4];
+extern const int phy_basic_t1_features_array[2];
+extern const int phy_gbit_features_array[2];
+extern const int phy_10gbit_features_array[1];
+
 /*
  * Set phydev->irq to PHY_POLL if interrupts are not supported,
  * or not desired for this PHY.  Set to PHY_IGNORE_INTERRUPT if
@@ -66,9 +71,8 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
 #define PHY_POLL		-1
 #define PHY_IGNORE_INTERRUPT	-2
 
-#define PHY_HAS_INTERRUPT	0x00000001
-#define PHY_IS_INTERNAL		0x00000002
-#define PHY_RST_AFTER_CLK_EN	0x00000004
+#define PHY_IS_INTERNAL		0x00000001
+#define PHY_RST_AFTER_CLK_EN	0x00000002
 #define MDIO_DEVICE_IS_PHY	0x80000000
 
 /* Interface Mode definitions */
@@ -178,7 +182,6 @@ static inline const char *phy_modes(phy_interface_t interface)
 #define PHY_INIT_TIMEOUT	100000
 #define PHY_STATE_TIME		1
 #define PHY_FORCE_TIMEOUT	10
-#define PHY_AN_TIMEOUT		10
 
 #define PHY_MAX_ADDR	32
 
@@ -264,57 +267,27 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
 void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
 
-#define PHY_INTERRUPT_DISABLED	0x0
-#define PHY_INTERRUPT_ENABLED	0x80000000
+#define PHY_INTERRUPT_DISABLED	false
+#define PHY_INTERRUPT_ENABLED	true
 
 /* PHY state machine states:
  *
  * DOWN: PHY device and driver are not ready for anything.  probe
  * should be called if and only if the PHY is in this state,
  * given that the PHY device exists.
- * - PHY driver probe function will, depending on the PHY, set
- * the state to STARTING or READY
- *
- * STARTING:  PHY device is coming up, and the ethernet driver is
- * not ready.  PHY drivers may set this in the probe function.
- * If they do, they are responsible for making sure the state is
- * eventually set to indicate whether the PHY is UP or READY,
- * depending on the state when the PHY is done starting up.
- * - PHY driver will set the state to READY
- * - start will set the state to PENDING
+ * - PHY driver probe function will set the state to READY
  *
  * READY: PHY is ready to send and receive packets, but the
  * controller is not.  By default, PHYs which do not implement
- * probe will be set to this state by phy_probe().  If the PHY
- * driver knows the PHY is ready, and the PHY state is STARTING,
- * then it sets this STATE.
+ * probe will be set to this state by phy_probe().
  * - start will set the state to UP
  *
- * PENDING: PHY device is coming up, but the ethernet driver is
- * ready.  phy_start will set this state if the PHY state is
- * STARTING.
- * - PHY driver will set the state to UP when the PHY is ready
- *
  * UP: The PHY and attached device are ready to do work.
  * Interrupts should be started here.
- * - timer moves to AN
- *
- * AN: The PHY is currently negotiating the link state.  Link is
- * therefore down for now.  phy_timer will set this state when it
- * detects the state is UP.  config_aneg will set this state
- * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
- * - If autonegotiation finishes, but there's no link, it sets
- *   the state to NOLINK.
- * - If aneg finishes with link, it sets the state to RUNNING,
- *   and calls adjust_link
- * - If autonegotiation did not finish after an arbitrary amount
- *   of time, autonegotiation should be tried again if the PHY
- *   supports "magic" autonegotiation (back to AN)
- * - If it didn't finish, and no magic_aneg, move to FORCING.
+ * - timer moves to NOLINK or RUNNING
  *
  * NOLINK: PHY is up, but not currently plugged in.
- * - If the timer notes that the link comes back, we move to RUNNING
- * - config_aneg moves to AN
+ * - irq or timer will set RUNNING if link comes back
  * - phy_stop moves to HALTED
  *
  * FORCING: PHY is being configured with forced settings
@@ -325,11 +298,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
  *
  * RUNNING: PHY is currently up, running, and possibly sending
  * and/or receiving packets
- * - timer will set CHANGELINK if we're polling (this ensures the
- *   link state is polled every other cycle of this state machine,
- *   which makes it every other second)
- * - irq will set CHANGELINK
- * - config_aneg will set AN
+ * - irq or timer will set NOLINK if link goes down
  * - phy_stop moves to HALTED
  *
  * CHANGELINK: PHY experienced a change in link state
@@ -349,11 +318,8 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
  */
 enum phy_state {
 	PHY_DOWN = 0,
-	PHY_STARTING,
 	PHY_READY,
-	PHY_PENDING,
 	PHY_UP,
-	PHY_AN,
 	PHY_RUNNING,
 	PHY_NOLINK,
 	PHY_FORCING,
@@ -390,7 +356,6 @@ struct phy_c45_device_ids {
  * giving up on the current attempt at acquiring a link
  * irq: IRQ number of the PHY's interrupt (-1 if none)
  * phy_timer: The timer for handling the state machine
- * phy_queue: A work_queue for the phy_mac_interrupt
  * attached_dev: The attached enet driver's device instance ptr
  * adjust_link: Callback for the enet controller to respond to
  * changes in the link state.
@@ -427,6 +392,9 @@ struct phy_device {
 	/* The most recently read link state */
 	unsigned link:1;
 
+	/* Interrupts are enabled */
+	unsigned interrupts:1;
+
 	enum phy_state state;
 
 	u32 dev_flags;
@@ -442,14 +410,11 @@ struct phy_device {
 	int pause;
 	int asym_pause;
 
-	/* Enabled Interrupts */
-	u32 interrupts;
-
-	/* Union of PHY and Attached devices' supported modes */
-	/* See mii.h for more info */
-	u32 supported;
-	u32 advertising;
-	u32 lp_advertising;
+	/* Union of PHY and Attached devices' supported link modes */
+	/* See ethtool.h for more info */
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
 
 	/* Energy efficient ethernet modes which should be prohibited */
 	u32 eee_broken_modes;
@@ -475,7 +440,6 @@ struct phy_device {
 	void *priv;
 
 	/* Interrupt and Polling infrastructure */
-	struct work_struct phy_queue;
 	struct delayed_work state_queue;
 
 	struct mutex lock;
@@ -674,6 +638,10 @@ struct phy_driver {
 #define PHY_ANY_ID "MATCH ANY PHY"
 #define PHY_ANY_UID 0xffffffff
 
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+
 /* A Structure for boards to register fixups with the PHY Lib */
 struct phy_fixup {
 	struct list_head list;
@@ -697,9 +665,9 @@ struct phy_setting {
 
 const struct phy_setting *
 phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
-		   size_t maxbit, bool exact);
+		   bool exact);
 size_t phy_speeds(unsigned int *speeds, size_t size,
-		  unsigned long *mask, size_t maxbit);
+		  unsigned long *mask);
 
 void phy_resolve_aneg_linkmode(struct phy_device *phydev);
 
@@ -1050,11 +1018,9 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
 int phy_drivers_register(struct phy_driver *new_driver, int n,
 			 struct module *owner);
 void phy_state_machine(struct work_struct *work);
-void phy_change_work(struct work_struct *work);
 void phy_mac_interrupt(struct phy_device *phydev);
 void phy_start_machine(struct phy_device *phydev);
 void phy_stop_machine(struct phy_device *phydev);
-void phy_trigger_machine(struct phy_device *phydev);
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 void phy_ethtool_ksettings_get(struct phy_device *phydev,
 			       struct ethtool_link_ksettings *cmd);
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index b37b05b..4587ce3 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -20,7 +20,7 @@ struct phy_device;
 #include <linux/leds.h>
 #include <linux/phy.h>
 
-#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE	10
+#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE	11
 
 #define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
 				       FIELD_SIZEOF(struct mdio_device, addr)+\
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 51349d1..7121bbe 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -39,6 +39,15 @@ struct ptp_clock_request {
 };
 
 struct system_device_crosststamp;
+
+/**
+ * struct ptp_system_timestamp - system time corresponding to a PHC timestamp
+ */
+struct ptp_system_timestamp {
+	struct timespec64 pre_ts;
+	struct timespec64 post_ts;
+};
+
 /**
  * struct ptp_clock_info - decribes a PTP hardware clock
  *
@@ -73,8 +82,18 @@ struct system_device_crosststamp;
  *            parameter delta: Desired change in nanoseconds.
  *
  * @gettime64:  Reads the current time from the hardware clock.
+ *              This method is deprecated.  New drivers should implement
+ *              the @gettimex64 method instead.
  *              parameter ts: Holds the result.
  *
+ * @gettimex64:  Reads the current time from the hardware clock and optionally
+ *               also the system clock.
+ *               parameter ts: Holds the PHC timestamp.
+ *               parameter sts: If not NULL, it holds a pair of timestamps from
+ *               the system clock. The first reading is made right before
+ *               reading the lowest bits of the PHC timestamp and the second
+ *               reading immediately follows that.
+ *
  * @getcrosststamp:  Reads the current time from the hardware clock and
  *                   system clock simultaneously.
  *                   parameter cts: Contains timestamp (device,system) pair,
@@ -124,6 +143,8 @@ struct ptp_clock_info {
 	int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
 	int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
 	int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+	int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+			  struct ptp_system_timestamp *sts);
 	int (*getcrosststamp)(struct ptp_clock_info *ptp,
 			      struct system_device_crosststamp *cts);
 	int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
@@ -247,4 +268,16 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
 
 #endif
 
+static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
+{
+	if (sts)
+		ktime_get_real_ts64(&sts->pre_ts);
+}
+
+static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
+{
+	if (sts)
+		ktime_get_real_ts64(&sts->post_ts);
+}
+
 #endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0d1b2c3..f17a745 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -777,6 +777,14 @@ struct sk_buff {
 	__u8			encap_hdr_csum:1;
 	__u8			csum_valid:1;
 
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_VLAN_PRESENT_BIT	7
+#else
+#define PKT_VLAN_PRESENT_BIT	0
+#endif
+#define PKT_VLAN_PRESENT_OFFSET()	offsetof(struct sk_buff, __pkt_vlan_present_offset)
+	__u8			__pkt_vlan_present_offset[0];
+	__u8			vlan_present:1;
 	__u8			csum_complete_sw:1;
 	__u8			csum_level:2;
 	__u8			csum_not_inet:1;
@@ -784,8 +792,8 @@ struct sk_buff {
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
 	__u8			ndisc_nodetype:2;
 #endif
-	__u8			ipvs_property:1;
 
+	__u8			ipvs_property:1;
 	__u8			inner_protocol_type:1;
 	__u8			remcsum_offload:1;
 #ifdef CONFIG_NET_SWITCHDEV
@@ -2524,10 +2532,8 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
 {
-	if (unlikely(skb_is_nonlinear(skb))) {
-		WARN_ON(1);
+	if (WARN_ON(skb_is_nonlinear(skb)))
 		return;
-	}
 	skb->len = len;
 	skb_set_tail_pointer(skb, len);
 }
@@ -3345,7 +3351,6 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
 		    unsigned int flags);
 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
 			 int len);
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 320d49d..2725c83 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,13 @@ struct udp_sock {
 	unsigned int	 corkflag;	/* Cork is required */
 	__u8		 encap_type;	/* Is this an Encapsulation socket? */
 	unsigned char	 no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
-			 no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
+			 no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+			 encap_enabled:1, /* This socket enabled encap
+					   * processing; UDP tunnels and
+					   * different encapsulation layer set
+					   * this
+					   */
+			 gro_enabled:1;	/* Can accept GRO packets */
 	/*
 	 * Following member retains the information to create a UDP header
 	 * when the socket is uncorked.
@@ -71,6 +77,7 @@ struct udp_sock {
 	 * For encapsulation sockets.
 	 */
 	int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+	int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
 	void (*encap_destroy)(struct sock *sk);
 
 	/* GRO functions for UDP socket */
@@ -115,6 +122,23 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
 	return udp_sk(sk)->no_check6_rx;
 }
 
+static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+				 struct sk_buff *skb)
+{
+	int gso_size;
+
+	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+		gso_size = skb_shinfo(skb)->gso_size;
+		put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
+	}
+}
+
+static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+{
+	return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
+	       skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+}
+
 #define udp_portaddr_for_each_entry(__sk, list) \
 	hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
 
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 946bd53..ca23860 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -10,7 +10,7 @@
 struct gnet_stats_basic_cpu {
 	struct gnet_stats_basic_packed bstats;
 	struct u64_stats_sync syncp;
-};
+} __aligned(2 * sizeof(u64));
 
 struct net_rate_estimator;
 
diff --git a/include/net/geneve.h b/include/net/geneve.h
index a7600ed..fc6a7e0 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -60,6 +60,12 @@ struct genevehdr {
 	struct geneve_opt options[];
 };
 
+static inline bool netif_is_geneve(const struct net_device *dev)
+{
+	return dev->rtnl_link_ops &&
+	       !strcmp(dev->rtnl_link_ops->kind, "geneve");
+}
+
 #ifdef CONFIG_INET
 struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
 					u8 name_assign_type, u16 dst_port);
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 3ef2743..6ac3a5b 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -41,7 +41,7 @@ struct net;
 
 void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
 int icmp_rcv(struct sk_buff *skb);
-void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_err(struct sk_buff *skb, u32 info);
 int icmp_init(void);
 void icmp_out_count(struct net *net, unsigned char type);
 
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 6e91e38..9db98af 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -115,9 +115,8 @@ int inet6_hash(struct sock *sk);
 	 ((__sk)->sk_family == AF_INET6)			&&	\
 	 ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr))		&&	\
 	 ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr))	&&	\
-	 (!(__sk)->sk_bound_dev_if	||				\
-	   ((__sk)->sk_bound_dev_if == (__dif))	||			\
-	   ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
+	 (((__sk)->sk_bound_dev_if == (__dif))	||			\
+	  ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
 	 net_eq(sock_net(__sk), (__net)))
 
 #endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 9141e95..0ce460e 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -79,6 +79,7 @@ struct inet_ehash_bucket {
 
 struct inet_bind_bucket {
 	possible_net_t		ib_net;
+	int			l3mdev;
 	unsigned short		port;
 	signed char		fastreuse;
 	signed char		fastreuseport;
@@ -188,10 +189,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
 	hashinfo->ehash_locks = NULL;
 }
 
+static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+					int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+	return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
+				 bound_dev_if, dif, sdif);
+#else
+	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
 struct inet_bind_bucket *
 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
 			struct inet_bind_hashbucket *head,
-			const unsigned short snum);
+			const unsigned short snum, int l3mdev);
 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
 			      struct inet_bind_bucket *tb);
 
@@ -282,9 +294,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
 	(((__sk)->sk_portpair == (__ports))			&&	\
 	 ((__sk)->sk_addrpair == (__cookie))			&&	\
-	 (!(__sk)->sk_bound_dev_if	||				\
-	   ((__sk)->sk_bound_dev_if == (__dif))			||	\
-	   ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
+	 (((__sk)->sk_bound_dev_if == (__dif))			||	\
+	  ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
 	 net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
@@ -294,9 +305,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
 	(((__sk)->sk_portpair == (__ports))		&&		\
 	 ((__sk)->sk_daddr	== (__saddr))		&&		\
 	 ((__sk)->sk_rcv_saddr	== (__daddr))		&&		\
-	 (!(__sk)->sk_bound_dev_if	||				\
-	   ((__sk)->sk_bound_dev_if == (__dif))		||		\
-	   ((__sk)->sk_bound_dev_if == (__sdif)))	&&		\
+	 (((__sk)->sk_bound_dev_if == (__dif))		||		\
+	  ((__sk)->sk_bound_dev_if == (__sdif)))	&&		\
 	 net_eq(sock_net(__sk), (__net)))
 #endif /* 64-bit arch */
 
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a80fd0a..e8eef85 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,6 +130,27 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
 	return sk->sk_bound_dev_if;
 }
 
+static inline int inet_sk_bound_l3mdev(const struct sock *sk)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+	struct net *net = sock_net(sk);
+
+	if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+		return l3mdev_master_ifindex_by_index(net,
+						      sk->sk_bound_dev_if);
+#endif
+
+	return 0;
+}
+
+static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
+				     int dif, int sdif)
+{
+	if (!bound_dev_if)
+		return !sdif || l3mdev_accept;
+	return bound_dev_if == dif || bound_dev_if == sdif;
+}
+
 struct inet_cork {
 	unsigned int		flags;
 	__be32			addr;
diff --git a/include/net/ip.h b/include/net/ip.h
index 72593e1..8866bfc 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -155,6 +155,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
 		 struct net_device *orig_dev);
 int ip_local_deliver(struct sk_buff *skb);
+void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
 int ip_mr_input(struct sk_buff *skb);
 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -421,7 +422,8 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
 }
 
 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
-					int fc_mx_len);
+					int fc_mx_len,
+					struct netlink_ext_ack *extack);
 static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
 {
 	if (fib_metrics != &dst_default_metrics &&
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 236e40b..69b4bcf 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -69,6 +69,8 @@ struct ip6_tnl_encap_ops {
 	size_t (*encap_hlen)(struct ip_tunnel_encap *e);
 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
 			    u8 *protocol, struct flowi6 *fl6);
+	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+			   u8 type, u8 code, int offset, __be32 info);
 };
 
 #ifdef CONFIG_INET
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index b0d022f..db6b2218 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -311,6 +311,7 @@ struct ip_tunnel_encap_ops {
 	size_t (*encap_hlen)(struct ip_tunnel_encap *e);
 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
 			    u8 *protocol, struct flowi4 *fl4);
+	int (*err_handler)(struct sk_buff *skb, u32 info);
 };
 
 #define MAX_IPTUN_ENCAP_OPS 8
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8296505..daf8086 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -975,6 +975,8 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_forward(struct sk_buff *skb);
 int ip6_input(struct sk_buff *skb);
 int ip6_mc_input(struct sk_buff *skb);
+void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
+			      bool have_final);
 
 int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index e47503b..104a666 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -103,6 +103,9 @@ struct netns_ipv4 {
 	/* Shall we try to damage output packets if routing dev changes? */
 	int sysctl_ip_dynaddr;
 	int sysctl_ip_early_demux;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+	int sysctl_raw_l3mdev_accept;
+#endif
 	int sysctl_tcp_early_demux;
 	int sysctl_udp_early_demux;
 
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 72ffb31..ea191d8 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -81,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block,
 			       struct tcf_block_cb *block_cb);
 void tcf_block_cb_unregister(struct tcf_block *block,
 			     tc_setup_cb_t *cb, void *cb_ident);
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+				tc_indr_block_bind_cb_t *cb, void *cb_ident);
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+			      tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+				   tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void tc_indr_block_cb_unregister(struct net_device *dev,
+				 tc_indr_block_bind_cb_t *cb, void *cb_ident);
 
 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 		 struct tcf_result *res, bool compat_mode);
@@ -183,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block,
 {
 }
 
+static inline
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+				tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	return 0;
+}
+
+static inline
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+			      tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	return 0;
+}
+
+static inline
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+				   tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
+static inline
+void tc_indr_block_cb_unregister(struct net_device *dev,
+				 tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 			       struct tcf_result *res, bool compat_mode)
 {
@@ -609,6 +643,7 @@ struct tc_cls_common_offload {
 
 struct tc_cls_u32_knode {
 	struct tcf_exts *exts;
+	struct tcf_result *res;
 	struct tc_u32_sel *sel;
 	u32 handle;
 	u32 val;
@@ -787,12 +822,21 @@ enum tc_mq_command {
 	TC_MQ_CREATE,
 	TC_MQ_DESTROY,
 	TC_MQ_STATS,
+	TC_MQ_GRAFT,
+};
+
+struct tc_mq_opt_offload_graft_params {
+	unsigned long queue;
+	u32 child_handle;
 };
 
 struct tc_mq_qopt_offload {
 	enum tc_mq_command command;
 	u32 handle;
-	struct tc_qopt_offload_stats stats;
+	union {
+		struct tc_qopt_offload_stats stats;
+		struct tc_mq_opt_offload_graft_params graft_params;
+	};
 };
 
 enum tc_red_command {
@@ -800,13 +844,16 @@ enum tc_red_command {
 	TC_RED_DESTROY,
 	TC_RED_STATS,
 	TC_RED_XSTATS,
+	TC_RED_GRAFT,
 };
 
 struct tc_red_qopt_offload_params {
 	u32 min;
 	u32 max;
 	u32 probability;
+	u32 limit;
 	bool is_ecn;
+	bool is_harddrop;
 	struct gnet_stats_queue *qstats;
 };
 
@@ -818,6 +865,51 @@ struct tc_red_qopt_offload {
 		struct tc_red_qopt_offload_params set;
 		struct tc_qopt_offload_stats stats;
 		struct red_stats *xstats;
+		u32 child_handle;
+	};
+};
+
+enum tc_gred_command {
+	TC_GRED_REPLACE,
+	TC_GRED_DESTROY,
+	TC_GRED_STATS,
+};
+
+struct tc_gred_vq_qopt_offload_params {
+	bool present;
+	u32 limit;
+	u32 prio;
+	u32 min;
+	u32 max;
+	bool is_ecn;
+	bool is_harddrop;
+	u32 probability;
+	/* Only need backlog, see struct tc_prio_qopt_offload_params */
+	u32 *backlog;
+};
+
+struct tc_gred_qopt_offload_params {
+	bool grio_on;
+	bool wred_on;
+	unsigned int dp_cnt;
+	unsigned int dp_def;
+	struct gnet_stats_queue *qstats;
+	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload_stats {
+	struct gnet_stats_basic_packed bstats[MAX_DPs];
+	struct gnet_stats_queue qstats[MAX_DPs];
+	struct red_stats *xstats[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload {
+	enum tc_gred_command command;
+	u32 handle;
+	u32 parent;
+	union {
+		struct tc_gred_qopt_offload_params set;
+		struct tc_gred_qopt_offload_stats stats;
 	};
 };
 
@@ -854,4 +946,14 @@ struct tc_prio_qopt_offload {
 	};
 };
 
+enum tc_root_command {
+	TC_ROOT_GRAFT,
+};
+
+struct tc_root_qopt_offload {
+	enum tc_root_command command;
+	u32 handle;
+	bool ingress;
+};
+
 #endif
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 4fc75f7..92b3eaa 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -42,7 +42,10 @@ struct net_protocol {
 	int			(*early_demux)(struct sk_buff *skb);
 	int			(*early_demux_handler)(struct sk_buff *skb);
 	int			(*handler)(struct sk_buff *skb);
-	void			(*err_handler)(struct sk_buff *skb, u32 info);
+
+	/* This returns an error if we weren't able to handle the error. */
+	int			(*err_handler)(struct sk_buff *skb, u32 info);
+
 	unsigned int		no_policy:1,
 				netns_ok:1,
 				/* does the protocol do more stringent
@@ -58,10 +61,12 @@ struct inet6_protocol {
 	void    (*early_demux_handler)(struct sk_buff *skb);
 	int	(*handler)(struct sk_buff *skb);
 
-	void	(*err_handler)(struct sk_buff *skb,
+	/* This returns an error if we weren't able to handle the error. */
+	int	(*err_handler)(struct sk_buff *skb,
 			       struct inet6_skb_parm *opt,
 			       u8 type, u8 code, int offset,
 			       __be32 info);
+
 	unsigned int	flags;	/* INET6_PROTO_xxx */
 };
 
diff --git a/include/net/raw.h b/include/net/raw.h
index 9c9fa98..821ff48 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -17,7 +17,7 @@
 #ifndef _RAW_H
 #define _RAW_H
 
-
+#include <net/inet_sock.h>
 #include <net/protocol.h>
 #include <linux/icmp.h>
 
@@ -61,6 +61,7 @@ void raw_seq_stop(struct seq_file *seq, void *v);
 
 int raw_hash_sk(struct sock *sk);
 void raw_unhash_sk(struct sock *sk);
+void raw_init(void);
 
 struct raw_sock {
 	/* inet_sock has to be the first member */
@@ -74,4 +75,15 @@ static inline struct raw_sock *raw_sk(const struct sock *sk)
 	return (struct raw_sock *)sk;
 }
 
+static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+				       int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+	return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept,
+				 bound_dev_if, dif, sdif);
+#else
+	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
 #endif	/* _RAW_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index cf26e5aa..e2091bb 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -159,7 +159,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
 				    unsigned char name_assign_type,
 				    const struct rtnl_link_ops *ops,
-				    struct nlattr *tb[]);
+				    struct nlattr *tb[],
+				    struct netlink_ext_ack *extack);
 int rtnl_delete_link(struct net_device *dev);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4d73642..9481f2c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -24,6 +24,9 @@ struct bpf_flow_keys;
 typedef int tc_setup_cb_t(enum tc_setup_type type,
 			  void *type_data, void *cb_priv);
 
+typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
+				    enum tc_setup_type type, void *type_data);
+
 struct qdisc_rate_table {
 	struct tc_ratespec rate;
 	u32		data[256];
@@ -579,6 +582,30 @@ void qdisc_put(struct Qdisc *qdisc);
 void qdisc_put_unlocked(struct Qdisc *qdisc);
 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
 			       unsigned int len);
+#ifdef CONFIG_NET_SCHED
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+			      void *type_data);
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+				struct Qdisc *new, struct Qdisc *old,
+				enum tc_setup_type type, void *type_data,
+				struct netlink_ext_ack *extack);
+#else
+static inline int
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+			  void *type_data)
+{
+	q->flags &= ~TCQ_F_OFFLOADED;
+	return 0;
+}
+
+static inline void
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+			   struct Qdisc *new, struct Qdisc *old,
+			   enum tc_setup_type type, void *type_data,
+			   struct netlink_ext_ack *extack)
+{
+}
+#endif
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 			  const struct Qdisc_ops *ops,
 			  struct netlink_ext_ack *extack);
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 8dadc74..4588bdc 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -71,7 +71,7 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
 					 SCTP_NUM_AUTH_CHUNK_TYPES)
 
 /* These are the different flavours of event.  */
-enum sctp_event {
+enum sctp_event_type {
 	SCTP_EVENT_T_CHUNK = 1,
 	SCTP_EVENT_T_TIMEOUT,
 	SCTP_EVENT_T_OTHER,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index ab9242e..38da91d 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -151,8 +151,8 @@ int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
  * sctp/input.c
  */
 int sctp_rcv(struct sk_buff *skb);
-void sctp_v4_err(struct sk_buff *skb, u32 info);
-void sctp_hash_endpoint(struct sctp_endpoint *);
+int sctp_v4_err(struct sk_buff *skb, u32 info);
+int sctp_hash_endpoint(struct sctp_endpoint *ep);
 void sctp_unhash_endpoint(struct sctp_endpoint *);
 struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
 			     struct sctphdr *, struct sctp_association **,
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 9e3d327..24825a8 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -173,7 +173,7 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
 __u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
 const struct sctp_sm_table_entry *sctp_sm_lookup_event(
 					struct net *net,
-					enum sctp_event event_type,
+					enum sctp_event_type event_type,
 					enum sctp_state state,
 					union sctp_subtype event_subtype);
 int sctp_chunk_iif(const struct sctp_chunk *);
@@ -313,7 +313,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
 /* Prototypes for statetable processing. */
 
-int sctp_do_sm(struct net *net, enum sctp_event event_type,
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
 	       union sctp_subtype subtype, enum sctp_state state,
 	       struct sctp_endpoint *ep, struct sctp_association *asoc,
 	       void *event_arg, gfp_t gfp);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a11f937..7eaa294 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -96,7 +96,9 @@ struct sctp_stream;
 
 struct sctp_bind_bucket {
 	unsigned short	port;
-	unsigned short	fastreuse;
+	signed char	fastreuse;
+	signed char	fastreuseport;
+	kuid_t		fastuid;
 	struct hlist_node	node;
 	struct hlist_head	owner;
 	struct net	*net;
@@ -215,7 +217,7 @@ struct sctp_sock {
 	 * These two structures must be grouped together for the usercopy
 	 * whitelist region.
 	 */
-	struct sctp_event_subscribe subscribe;
+	__u16 subscribe;
 	struct sctp_initmsg initmsg;
 
 	int user_frag;
@@ -1190,6 +1192,8 @@ int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *,
 			 struct sctp_sock *, struct sctp_sock *);
 int sctp_bind_addr_state(const struct sctp_bind_addr *bp,
 			 const union sctp_addr *addr);
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+			  struct sctp_sock *sp2, int cnt2);
 union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr	*bp,
 					const union sctp_addr	*addrs,
 					int			addrcnt,
@@ -2073,6 +2077,8 @@ struct sctp_association {
 
 	int sent_cnt_removable;
 
+	__u16 subscribe;
+
 	__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
 	__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
 };
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 51b4e06..bd922a0 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -164,30 +164,39 @@ void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
 
 __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
 
-/* Is this event type enabled? */
-static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
-					     struct sctp_event_subscribe *mask)
+static inline void sctp_ulpevent_type_set(__u16 *subscribe,
+					  __u16 sn_type, __u8 on)
 {
-	int offset = sn_type - SCTP_SN_TYPE_BASE;
-	char *amask = (char *) mask;
+	if (sn_type > SCTP_SN_TYPE_MAX)
+		return;
 
-	if (offset >= sizeof(struct sctp_event_subscribe))
-		return 0;
-	return amask[offset];
+	if (on)
+		*subscribe |=  (1 << (sn_type - SCTP_SN_TYPE_BASE));
+	else
+		*subscribe &= ~(1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
+/* Is this event type enabled? */
+static inline bool sctp_ulpevent_type_enabled(__u16 subscribe, __u16 sn_type)
+{
+	if (sn_type > SCTP_SN_TYPE_MAX)
+		return false;
+
+	return subscribe & (1 << (sn_type - SCTP_SN_TYPE_BASE));
 }
 
 /* Given an event subscription, is this event enabled? */
-static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
-					   struct sctp_event_subscribe *mask)
+static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+					    __u16 subscribe)
 {
 	__u16 sn_type;
-	int enabled = 1;
 
-	if (sctp_ulpevent_is_notification(event)) {
-		sn_type = sctp_ulpevent_get_notification_type(event);
-		enabled = sctp_ulpevent_type_enabled(sn_type, mask);
-	}
-	return enabled;
+	if (!sctp_ulpevent_is_notification(event))
+		return true;
+
+	sn_type = sctp_ulpevent_get_notification_type(event);
+
+	return sctp_ulpevent_type_enabled(subscribe, sn_type);
 }
 
 #endif /* __sctp_ulpevent_h__ */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 881ecb1..866b6d1 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -95,8 +95,8 @@ struct switchdev_obj_port_vlan {
 	u16 vid_end;
 };
 
-#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
-	container_of(obj, struct switchdev_obj_port_vlan, obj)
+#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
+	container_of((OBJ), struct switchdev_obj_port_vlan, obj)
 
 /* SWITCHDEV_OBJ_ID_PORT_MDB */
 struct switchdev_obj_port_mdb {
@@ -105,8 +105,8 @@ struct switchdev_obj_port_mdb {
 	u16 vid;
 };
 
-#define SWITCHDEV_OBJ_PORT_MDB(obj) \
-	container_of(obj, struct switchdev_obj_port_mdb, obj)
+#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
+	container_of((OBJ), struct switchdev_obj_port_mdb, obj)
 
 void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
 				  void *data, void (*destructor)(void const *),
@@ -121,10 +121,6 @@ typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
  * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
  *
  * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
- *
- * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
- *
- * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
  */
 struct switchdev_ops {
 	int	(*switchdev_port_attr_get)(struct net_device *dev,
@@ -132,11 +128,6 @@ struct switchdev_ops {
 	int	(*switchdev_port_attr_set)(struct net_device *dev,
 					   const struct switchdev_attr *attr,
 					   struct switchdev_trans *trans);
-	int	(*switchdev_port_obj_add)(struct net_device *dev,
-					  const struct switchdev_obj *obj,
-					  struct switchdev_trans *trans);
-	int	(*switchdev_port_obj_del)(struct net_device *dev,
-					  const struct switchdev_obj *obj);
 };
 
 enum switchdev_notifier_type {
@@ -146,6 +137,11 @@ enum switchdev_notifier_type {
 	SWITCHDEV_FDB_DEL_TO_DEVICE,
 	SWITCHDEV_FDB_OFFLOADED,
 
+	SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
+	SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
+
+	SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE,
+	SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE,
 	SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
 	SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
 	SWITCHDEV_VXLAN_FDB_OFFLOADED,
@@ -163,6 +159,13 @@ struct switchdev_notifier_fdb_info {
 	   offloaded:1;
 };
 
+struct switchdev_notifier_port_obj_info {
+	struct switchdev_notifier_info info; /* must be first */
+	const struct switchdev_obj *obj;
+	struct switchdev_trans *trans;
+	bool handled;
+};
+
 static inline struct net_device *
 switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
 {
@@ -180,10 +183,17 @@ int switchdev_port_obj_add(struct net_device *dev,
 			   const struct switchdev_obj *obj);
 int switchdev_port_obj_del(struct net_device *dev,
 			   const struct switchdev_obj *obj);
+
 int register_switchdev_notifier(struct notifier_block *nb);
 int unregister_switchdev_notifier(struct notifier_block *nb);
 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
 			     struct switchdev_notifier_info *info);
+
+int register_switchdev_blocking_notifier(struct notifier_block *nb);
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+				      struct switchdev_notifier_info *info);
+
 void switchdev_port_fwd_mark_set(struct net_device *dev,
 				 struct net_device *group_dev,
 				 bool joining);
@@ -191,6 +201,18 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
 bool switchdev_port_same_parent_id(struct net_device *a,
 				   struct net_device *b);
 
+int switchdev_handle_port_obj_add(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*add_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj,
+				      struct switchdev_trans *trans));
+int switchdev_handle_port_obj_del(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*del_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj));
+
 #define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops))
 #else
 
@@ -239,12 +261,53 @@ static inline int call_switchdev_notifiers(unsigned long val,
 	return NOTIFY_DONE;
 }
 
+static inline int
+register_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline int
+unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline int
+call_switchdev_blocking_notifiers(unsigned long val,
+				  struct net_device *dev,
+				  struct switchdev_notifier_info *info)
+{
+	return NOTIFY_DONE;
+}
+
 static inline bool switchdev_port_same_parent_id(struct net_device *a,
 						 struct net_device *b)
 {
 	return false;
 }
 
+static inline int
+switchdev_handle_port_obj_add(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*add_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj,
+				      struct switchdev_trans *trans))
+{
+	return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*del_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj))
+{
+	return 0;
+}
+
 #define SWITCHDEV_SET_OPS(netdev, ops) do {} while (0)
 
 #endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a18914d..63e37dd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -313,7 +313,7 @@ extern struct proto tcp_prot;
 
 void tcp_tasklet_init(void);
 
-void tcp_v4_err(struct sk_buff *skb, u32);
+int tcp_v4_err(struct sk_buff *skb, u32);
 
 void tcp_shutdown(struct sock *sk, int how);
 
@@ -1315,33 +1315,16 @@ static inline __sum16 tcp_v4_check(int len, __be32 saddr,
 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
 }
 
-static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
-{
-	return __skb_checksum_complete(skb);
-}
-
 static inline bool tcp_checksum_complete(struct sk_buff *skb)
 {
 	return !skb_csum_unnecessary(skb) &&
-		__tcp_checksum_complete(skb);
+		__skb_checksum_complete(skb);
 }
 
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
 int tcp_filter(struct sock *sk, struct sk_buff *skb);
-
-#undef STATE_TRACE
-
-#ifdef STATE_TRACE
-static const char *statename[]={
-	"Unused","Established","Syn Sent","Syn Recv",
-	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
-	"Close Wait","Last ACK","Listen","Closing"
-};
-#endif
 void tcp_set_state(struct sock *sk, int state);
-
 void tcp_done(struct sock *sk);
-
 int tcp_abort(struct sock *sk, int err);
 
 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
diff --git a/include/net/udp.h b/include/net/udp.h
index 9e82cb3..fd6d948 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -252,6 +252,17 @@ static inline int udp_rqueue_get(struct sock *sk)
 	return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
 }
 
+static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+				       int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+	return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
+				 bound_dev_if, dif, sdif);
+#else
+	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
 /* net/ipv4/udp.c */
 void udp_destruct_sock(struct sock *sk);
 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
@@ -272,7 +283,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
 int udp_get_port(struct sock *sk, unsigned short snum,
 		 int (*saddr_cmp)(const struct sock *,
 				  const struct sock *));
-void udp_err(struct sk_buff *, u32);
+int udp_err(struct sk_buff *, u32);
 int udp_abort(struct sock *sk, int err);
 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
 int udp_push_pending_frames(struct sock *sk);
@@ -406,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
 } while(0)
 
 #if IS_ENABLED(CONFIG_IPV6)
-#define __UDPX_INC_STATS(sk, field)					\
-do {									\
-	if ((sk)->sk_family == AF_INET)					\
-		__UDP_INC_STATS(sock_net(sk), field, 0);		\
-	else								\
-		__UDP6_INC_STATS(sock_net(sk), field, 0);		\
-} while (0)
+#define __UDPX_MIB(sk, ipv4)						\
+({									\
+	ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :	\
+				 sock_net(sk)->mib.udp_statistics) :	\
+		(IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 :	\
+				 sock_net(sk)->mib.udp_stats_in6);	\
+})
 #else
-#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
+#define __UDPX_MIB(sk, ipv4)						\
+({									\
+	IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :		\
+			 sock_net(sk)->mib.udp_statistics;		\
+})
 #endif
 
+#define __UDPX_INC_STATS(sk, field) \
+	__SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
+
 #ifdef CONFIG_PROC_FS
 struct udp_seq_afinfo {
 	sa_family_t			family;
@@ -450,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 void udpv6_encap_enable(void);
 #endif
 
+static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+					      struct sk_buff *skb, bool ipv4)
+{
+	struct sk_buff *segs;
+
+	/* the GSO CB lays after the UDP one, no need to save and restore any
+	 * CB fragment
+	 */
+	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+	if (unlikely(IS_ERR_OR_NULL(segs))) {
+		int segs_nr = skb_shinfo(skb)->gso_segs;
+
+		atomic_add(segs_nr, &sk->sk_drops);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	consume_skb(skb);
+	return segs;
+}
+
 #endif	/* _UDP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index fe680ab..dc8d804 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -64,6 +64,8 @@ static inline int udp_sock_create(struct net *net,
 }
 
 typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
+					     struct sk_buff *skb);
 typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
 typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
 						    struct list_head *head,
@@ -76,6 +78,7 @@ struct udp_tunnel_sock_cfg {
 	/* Used for setting up udp_sock fields, see udp.h for details */
 	__u8  encap_type;
 	udp_tunnel_encap_rcv_t encap_rcv;
+	udp_tunnel_encap_err_lookup_t encap_err_lookup;
 	udp_tunnel_encap_destroy_t encap_destroy;
 	udp_tunnel_gro_receive_t gro_receive;
 	udp_tunnel_gro_complete_t gro_complete;
@@ -165,6 +168,12 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
 
 static inline void udp_tunnel_encap_enable(struct socket *sock)
 {
+	struct udp_sock *up = udp_sk(sock->sk);
+
+	if (up->encap_enabled)
+		return;
+
+	up->encap_enabled = 1;
 #if IS_ENABLED(CONFIG_IPV6)
 	if (sock->sk->sk_family == PF_INET6)
 		ipv6_stub->udpv6_encap_enable();
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 03431c1..b73c670 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -216,6 +216,7 @@ struct vxlan_config {
 	unsigned long		age_interval;
 	unsigned int		addrmax;
 	bool			no_share;
+	enum ifla_vxlan_df	df;
 };
 
 struct vxlan_dev_node {
@@ -420,6 +421,7 @@ struct switchdev_notifier_vxlan_fdb_info {
 	u8 eth_addr[ETH_ALEN];
 	__be32 vni;
 	bool offloaded;
+	bool added_by_user;
 };
 
 #if IS_ENABLED(CONFIG_VXLAN)
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 5687766..5cc7af0 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1205,8 +1205,10 @@ void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
  * qman_dqrr_set_ithresh - Set coalesce interrupt threshold
  * @portal: portal to set the new value on
  * @ithresh: new threshold value
+ *
+ * Returns 0 on success, or a negative error code.
  */
-void qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
 
 /**
  * qman_dqrr_get_iperiod - Get coalesce interrupt period
@@ -1219,7 +1221,9 @@ void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
  * qman_dqrr_set_iperiod - Set coalesce interrupt period
  * @portal: portal to set the new value on
  * @ithresh: new period value
+ *
+ * Returns 0 on success, or a negative error code.
  */
-void qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
 
 #endif	/* __FSL_QMAN_H */
diff --git a/include/trace/events/objagg.h b/include/trace/events/objagg.h
new file mode 100644
index 0000000..fcec0fc
--- /dev/null
+++ b/include/trace/events/objagg.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM objagg
+
+#if !defined(__TRACE_OBJAGG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_OBJAGG_H
+
+#include <linux/tracepoint.h>
+
+struct objagg;
+struct objagg_obj;
+
+TRACE_EVENT(objagg_create,
+	TP_PROTO(const struct objagg *objagg),
+
+	TP_ARGS(objagg),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+	),
+
+	TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_destroy,
+	TP_PROTO(const struct objagg *objagg),
+
+	TP_ARGS(objagg),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+	),
+
+	TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_obj_create,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj),
+
+	TP_ARGS(objagg, obj),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+	),
+
+	TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_destroy,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj),
+
+	TP_ARGS(objagg, obj),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+	),
+
+	TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_get,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj,
+		 unsigned int refcount),
+
+	TP_ARGS(objagg, obj, refcount),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+		__field(unsigned int, refcount)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+		__entry->refcount = refcount;
+	),
+
+	TP_printk("objagg %p, obj %p, refcount %u",
+		  __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_put,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj,
+		 unsigned int refcount),
+
+	TP_ARGS(objagg, obj, refcount),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+		__field(unsigned int, refcount)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+		__entry->refcount = refcount;
+	),
+
+	TP_printk("objagg %p, obj %p, refcount %u",
+		  __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_assign,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj,
+		 const struct objagg_obj *parent,
+		 unsigned int parent_refcount),
+
+	TP_ARGS(objagg, obj, parent, parent_refcount),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+		__field(const void *, parent)
+		__field(unsigned int, parent_refcount)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+		__entry->parent = parent;
+		__entry->parent_refcount = parent_refcount;
+	),
+
+	TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+		  __entry->objagg, __entry->obj,
+		  __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_unassign,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj,
+		 const struct objagg_obj *parent,
+		 unsigned int parent_refcount),
+
+	TP_ARGS(objagg, obj, parent, parent_refcount),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+		__field(const void *, parent)
+		__field(unsigned int, parent_refcount)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+		__entry->parent = parent;
+		__entry->parent_refcount = parent_refcount;
+	),
+
+	TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+		  __entry->objagg, __entry->obj,
+		  __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_root_create,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj),
+
+	TP_ARGS(objagg, obj),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+	),
+
+	TP_printk("objagg %p, obj %p",
+		  __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_root_destroy,
+	TP_PROTO(const struct objagg *objagg,
+		 const struct objagg_obj *obj),
+
+	TP_ARGS(objagg, obj),
+
+	TP_STRUCT__entry(
+		__field(const void *, objagg)
+		__field(const void *, obj)
+	),
+
+	TP_fast_assign(
+		__entry->objagg = objagg;
+		__entry->obj = obj;
+	),
+
+	TP_printk("objagg %p, obj %p",
+		  __entry->objagg, __entry->obj)
+);
+
+#endif /* __TRACE_OBJAGG_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index c8f8e24..17be76a 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -882,7 +882,7 @@ struct ethtool_rx_flow_spec {
 	__u32		location;
 };
 
-/* How rings are layed out when accessing virtual functions or
+/* How rings are laid out when accessing virtual functions or
  * offloaded queues is device specific. To allow users to do flow
  * steering and specify these queues the ring cookie is partitioned
  * into a 32bit queue index with an 8 bit virtual function id.
@@ -891,7 +891,7 @@ struct ethtool_rx_flow_spec {
  * devices start supporting PCIe w/ARI. However at the moment I
  * do not know of any devices that support this so I do not reserve
  * space for this at this time. If a future patch consumes the next
- * byte it should be aware of this possiblity.
+ * byte it should be aware of this possibility.
  */
 #define ETHTOOL_RX_FLOW_SPEC_RING	0x00000000FFFFFFFFLL
 #define ETHTOOL_RX_FLOW_SPEC_RING_VF	0x000000FF00000000LL
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 1debfa4..f42c069 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -533,6 +533,7 @@ enum {
 	IFLA_VXLAN_LABEL,
 	IFLA_VXLAN_GPE,
 	IFLA_VXLAN_TTL_INHERIT,
+	IFLA_VXLAN_DF,
 	__IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX	(__IFLA_VXLAN_MAX - 1)
@@ -542,6 +543,14 @@ struct ifla_vxlan_port_range {
 	__be16	high;
 };
 
+enum ifla_vxlan_df {
+	VXLAN_DF_UNSET = 0,
+	VXLAN_DF_SET,
+	VXLAN_DF_INHERIT,
+	__VXLAN_DF_END,
+	VXLAN_DF_MAX = __VXLAN_DF_END - 1,
+};
+
 /* GENEVE section */
 enum {
 	IFLA_GENEVE_UNSPEC,
@@ -557,10 +566,19 @@ enum {
 	IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
 	IFLA_GENEVE_LABEL,
 	IFLA_GENEVE_TTL_INHERIT,
+	IFLA_GENEVE_DF,
 	__IFLA_GENEVE_MAX
 };
 #define IFLA_GENEVE_MAX	(__IFLA_GENEVE_MAX - 1)
 
+enum ifla_geneve_df {
+	GENEVE_DF_UNSET = 0,
+	GENEVE_DF_SET,
+	GENEVE_DF_INHERIT,
+	__GENEVE_DF_END,
+	GENEVE_DF_MAX = __GENEVE_DF_END - 1,
+};
+
 /* PPP section */
 enum {
 	IFLA_PPP_UNSPEC,
diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h
index 0a26a55..a3f87c5 100644
--- a/include/uapi/linux/ncsi.h
+++ b/include/uapi/linux/ncsi.h
@@ -26,6 +26,12 @@
  * @NCSI_CMD_SEND_CMD: send NC-SI command to network card.
  *	Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID
  *	and NCSI_ATTR_CHANNEL_ID.
+ * @NCSI_CMD_SET_PACKAGE_MASK: set a whitelist of allowed packages.
+ *	Requires NCSI_ATTR_IFINDEX and NCSI_ATTR_PACKAGE_MASK.
+ * @NCSI_CMD_SET_CHANNEL_MASK: set a whitelist of allowed channels.
+ *	Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID, and
+ *	NCSI_ATTR_CHANNEL_MASK. If NCSI_ATTR_CHANNEL_ID is present it sets
+ *	the primary channel.
  * @NCSI_CMD_MAX: highest command number
  */
 enum ncsi_nl_commands {
@@ -34,6 +40,8 @@ enum ncsi_nl_commands {
 	NCSI_CMD_SET_INTERFACE,
 	NCSI_CMD_CLEAR_INTERFACE,
 	NCSI_CMD_SEND_CMD,
+	NCSI_CMD_SET_PACKAGE_MASK,
+	NCSI_CMD_SET_CHANNEL_MASK,
 
 	__NCSI_CMD_AFTER_LAST,
 	NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1
@@ -48,6 +56,10 @@ enum ncsi_nl_commands {
  * @NCSI_ATTR_PACKAGE_ID: package ID
  * @NCSI_ATTR_CHANNEL_ID: channel ID
  * @NCSI_ATTR_DATA: command payload
+ * @NCSI_ATTR_MULTI_FLAG: flag to signal that multi-mode should be enabled with
+ *	NCSI_CMD_SET_PACKAGE_MASK or NCSI_CMD_SET_CHANNEL_MASK.
+ * @NCSI_ATTR_PACKAGE_MASK: 32-bit mask of allowed packages.
+ * @NCSI_ATTR_CHANNEL_MASK: 32-bit mask of allowed channels.
  * @NCSI_ATTR_MAX: highest attribute number
  */
 enum ncsi_nl_attrs {
@@ -57,6 +69,9 @@ enum ncsi_nl_attrs {
 	NCSI_ATTR_PACKAGE_ID,
 	NCSI_ATTR_CHANNEL_ID,
 	NCSI_ATTR_DATA,
+	NCSI_ATTR_MULTI_FLAG,
+	NCSI_ATTR_PACKAGE_MASK,
+	NCSI_ATTR_CHANNEL_MASK,
 
 	__NCSI_ATTR_AFTER_LAST,
 	NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 401d0c1..95d0db2 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -485,6 +485,11 @@ enum {
 
 	TCA_FLOWER_IN_HW_COUNT,
 
+	TCA_FLOWER_KEY_PORT_SRC_MIN,	/* be16 */
+	TCA_FLOWER_KEY_PORT_SRC_MAX,	/* be16 */
+	TCA_FLOWER_KEY_PORT_DST_MIN,	/* be16 */
+	TCA_FLOWER_KEY_PORT_DST_MAX,	/* be16 */
+
 	__TCA_FLOWER_MAX,
 };
 
@@ -518,6 +523,8 @@ enum {
 	TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
 };
 
+#define TCA_FLOWER_MASK_FLAGS_RANGE	(1 << 0) /* Range-based match */
+
 /* Match-all classifier */
 
 enum {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 89ee47c..0d18b1d 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -291,11 +291,38 @@ enum {
        TCA_GRED_DPS,
        TCA_GRED_MAX_P,
        TCA_GRED_LIMIT,
+       TCA_GRED_VQ_LIST,	/* nested TCA_GRED_VQ_ENTRY */
        __TCA_GRED_MAX,
 };
 
 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
 
+enum {
+	TCA_GRED_VQ_ENTRY_UNSPEC,
+	TCA_GRED_VQ_ENTRY,	/* nested TCA_GRED_VQ_* */
+	__TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+	TCA_GRED_VQ_UNSPEC,
+	TCA_GRED_VQ_PAD,
+	TCA_GRED_VQ_DP,			/* u32 */
+	TCA_GRED_VQ_STAT_BYTES,		/* u64 */
+	TCA_GRED_VQ_STAT_PACKETS,	/* u32 */
+	TCA_GRED_VQ_STAT_BACKLOG,	/* u32 */
+	TCA_GRED_VQ_STAT_PROB_DROP,	/* u32 */
+	TCA_GRED_VQ_STAT_PROB_MARK,	/* u32 */
+	TCA_GRED_VQ_STAT_FORCED_DROP,	/* u32 */
+	TCA_GRED_VQ_STAT_FORCED_MARK,	/* u32 */
+	TCA_GRED_VQ_STAT_PDROP,		/* u32 */
+	TCA_GRED_VQ_STAT_OTHER,		/* u32 */
+	TCA_GRED_VQ_FLAGS,		/* u32 */
+	__TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
 struct tc_gred_qopt {
 	__u32		limit;        /* HARD maximal queue length (bytes)    */
 	__u32		qth_min;      /* Min average length threshold (bytes) */
@@ -864,6 +891,8 @@ enum {
 
 	TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
 
+	TCA_FQ_CE_THRESHOLD,	/* DCTCP-like CE-marking threshold */
+
 	__TCA_FQ_MAX
 };
 
@@ -882,6 +911,7 @@ struct tc_fq_qd_stats {
 	__u32	inactive_flows;
 	__u32	throttled_flows;
 	__u32	unthrottle_latency_ns;
+	__u64	ce_mark;		/* packets above ce_threshold */
 };
 
 /* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 3039bf6..d73d839 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -84,6 +84,16 @@ struct ptp_sys_offset {
 	struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
 };
 
+struct ptp_sys_offset_extended {
+	unsigned int n_samples; /* Desired number of measurements. */
+	unsigned int rsv[3];    /* Reserved for future use. */
+	/*
+	 * Array of [system, phc, system] time stamps. The kernel will provide
+	 * 3*n_samples time stamps.
+	 */
+	struct ptp_clock_time ts[PTP_MAX_SAMPLES][3];
+};
+
 struct ptp_sys_offset_precise {
 	struct ptp_clock_time device;
 	struct ptp_clock_time sys_realtime;
@@ -136,6 +146,8 @@ struct ptp_pin_desc {
 #define PTP_PIN_SETFUNC    _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
 #define PTP_SYS_OFFSET_PRECISE \
 	_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
+#define PTP_SYS_OFFSET_EXTENDED \
+	_IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
 
 struct ptp_extts_event {
 	struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index c81feb3..d584073 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -129,6 +129,7 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_STREAM_SCHEDULER_VALUE	124
 #define SCTP_INTERLEAVING_SUPPORTED	125
 #define SCTP_SENDMSG_CONNECT	126
+#define SCTP_EVENT	127
 
 /* PR-SCTP policies */
 #define SCTP_PR_SCTP_NONE	0x0000
@@ -632,7 +633,9 @@ union sctp_notification {
  */
 
 enum sctp_sn_type {
-	SCTP_SN_TYPE_BASE     = (1<<15),
+	SCTP_SN_TYPE_BASE	= (1<<15),
+	SCTP_DATA_IO_EVENT	= SCTP_SN_TYPE_BASE,
+#define SCTP_DATA_IO_EVENT		SCTP_DATA_IO_EVENT
 	SCTP_ASSOC_CHANGE,
 #define SCTP_ASSOC_CHANGE		SCTP_ASSOC_CHANGE
 	SCTP_PEER_ADDR_CHANGE,
@@ -657,6 +660,8 @@ enum sctp_sn_type {
 #define SCTP_ASSOC_RESET_EVENT		SCTP_ASSOC_RESET_EVENT
 	SCTP_STREAM_CHANGE_EVENT,
 #define SCTP_STREAM_CHANGE_EVENT	SCTP_STREAM_CHANGE_EVENT
+	SCTP_SN_TYPE_MAX	= SCTP_STREAM_CHANGE_EVENT,
+#define SCTP_SN_TYPE_MAX		SCTP_SN_TYPE_MAX
 };
 
 /* Notification error codes used to fill up the error fields in some
@@ -1150,6 +1155,12 @@ struct sctp_add_streams {
 	uint16_t sas_outstrms;
 };
 
+struct sctp_event {
+	sctp_assoc_t se_assoc_id;
+	uint16_t se_type;
+	uint8_t se_on;
+};
+
 /* SCTP Stream schedulers */
 enum sctp_sched_type {
 	SCTP_SS_FCFS,
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index e02d319..8bb6cc5 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -266,6 +266,7 @@ enum {
 	TCP_NLA_BYTES_RETRANS,	/* Data bytes retransmitted */
 	TCP_NLA_DSACK_DUPS,	/* DSACK blocks received */
 	TCP_NLA_REORD_SEEN,	/* reordering events seen */
+	TCP_NLA_SRTT,		/* smoothed RTT in usecs */
 };
 
 /* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09502de..30baccb 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -33,6 +33,7 @@ struct udphdr {
 #define UDP_NO_CHECK6_TX 101	/* Disable sending checksum for UDP6X */
 #define UDP_NO_CHECK6_RX 102	/* Disable accpeting checksum for UDP6 */
 #define UDP_SEGMENT	103	/* Set GSO segmentation size */
+#define UDP_GRO		104	/* This socket can receive UDP GRO packets */
 
 /* UDP encapsulation types */
 #define UDP_ENCAP_ESPINUDP_NON_IKE	1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
diff --git a/lib/Kconfig b/lib/Kconfig
index a9965f4..7dbbcfe 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -624,3 +624,6 @@
 
 config GENERIC_LIB_UCMPDI2
 	bool
+
+config OBJAGG
+	tristate "objagg" if COMPILE_TEST
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1af29b8..b3c91b9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1976,6 +1976,16 @@
 
 	  If unsure, say N.
 
+config TEST_OBJAGG
+	tristate "Perform selftest on object aggreration manager"
+	default n
+	depends on OBJAGG
+	help
+	  Enable this option to test object aggregation manager on boot
+	  (or module load).
+
+	  If unsure, say N.
+
 endif # RUNTIME_TESTING_MENU
 
 config MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index db06d12..f5262d3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -75,6 +75,7 @@
 obj-$(CONFIG_TEST_KMOD) += test_kmod.o
 obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
 obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
+obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -274,3 +275,4 @@
 obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
 obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
 obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_OBJAGG) += objagg.o
diff --git a/lib/objagg.c b/lib/objagg.c
new file mode 100644
index 0000000..c9b457a
--- /dev/null
+++ b/lib/objagg.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rhashtable.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <linux/objagg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/objagg.h>
+
+struct objagg {
+	const struct objagg_ops *ops;
+	void *priv;
+	struct rhashtable obj_ht;
+	struct rhashtable_params ht_params;
+	struct list_head obj_list;
+	unsigned int obj_count;
+};
+
+struct objagg_obj {
+	struct rhash_head ht_node; /* member of objagg->obj_ht */
+	struct list_head list; /* member of objagg->obj_list */
+	struct objagg_obj *parent; /* if the object is nested, this
+				    * holds pointer to parent, otherwise NULL
+				    */
+	union {
+		void *delta_priv; /* user delta private */
+		void *root_priv; /* user root private */
+	};
+	unsigned int refcount; /* counts number of users of this object
+				* including nested objects
+				*/
+	struct objagg_obj_stats stats;
+	unsigned long obj[0];
+};
+
+static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
+{
+	return ++objagg_obj->refcount;
+}
+
+static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj)
+{
+	return --objagg_obj->refcount;
+}
+
+static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj)
+{
+	objagg_obj->stats.user_count++;
+	objagg_obj->stats.delta_user_count++;
+	if (objagg_obj->parent)
+		objagg_obj->parent->stats.delta_user_count++;
+}
+
+static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj)
+{
+	objagg_obj->stats.user_count--;
+	objagg_obj->stats.delta_user_count--;
+	if (objagg_obj->parent)
+		objagg_obj->parent->stats.delta_user_count--;
+}
+
+static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj)
+{
+	/* Nesting is not supported, so we can use ->parent
+	 * to figure out if the object is root.
+	 */
+	return !objagg_obj->parent;
+}
+
+/**
+ * objagg_obj_root_priv - obtains root private for an object
+ * @objagg_obj:	objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Either the object is root itself when the private is returned
+ * directly, or the parent is root and its private is returned
+ * instead.
+ *
+ * Returns a user private root pointer.
+ */
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj)
+{
+	if (objagg_obj_is_root(objagg_obj))
+		return objagg_obj->root_priv;
+	WARN_ON(!objagg_obj_is_root(objagg_obj->parent));
+	return objagg_obj->parent->root_priv;
+}
+EXPORT_SYMBOL(objagg_obj_root_priv);
+
+/**
+ * objagg_obj_delta_priv - obtains delta private for an object
+ * @objagg_obj:	objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private delta pointer or NULL in case the passed
+ * object is root.
+ */
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj)
+{
+	if (objagg_obj_is_root(objagg_obj))
+		return NULL;
+	return objagg_obj->delta_priv;
+}
+EXPORT_SYMBOL(objagg_obj_delta_priv);
+
+/**
+ * objagg_obj_raw - obtains object user private pointer
+ * @objagg_obj:	objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg.
+ */
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj)
+{
+	return objagg_obj->obj;
+}
+EXPORT_SYMBOL(objagg_obj_raw);
+
+static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
+{
+	return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params);
+}
+
+static int objagg_obj_parent_assign(struct objagg *objagg,
+				    struct objagg_obj *objagg_obj,
+				    struct objagg_obj *parent)
+{
+	void *delta_priv;
+
+	delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
+					       objagg_obj->obj);
+	if (IS_ERR(delta_priv))
+		return PTR_ERR(delta_priv);
+
+	/* User returned a delta private, that means that
+	 * our object can be aggregated into the parent.
+	 */
+	objagg_obj->parent = parent;
+	objagg_obj->delta_priv = delta_priv;
+	objagg_obj_ref_inc(objagg_obj->parent);
+	trace_objagg_obj_parent_assign(objagg, objagg_obj,
+				       parent,
+				       parent->refcount);
+	return 0;
+}
+
+static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
+					   struct objagg_obj *objagg_obj)
+{
+	struct objagg_obj *objagg_obj_cur;
+	int err;
+
+	list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) {
+		/* Nesting is not supported. In case the object
+		 * is not root, it cannot be assigned as parent.
+		 */
+		if (!objagg_obj_is_root(objagg_obj_cur))
+			continue;
+		err = objagg_obj_parent_assign(objagg, objagg_obj,
+					       objagg_obj_cur);
+		if (!err)
+			return 0;
+	}
+	return -ENOENT;
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+			     struct objagg_obj *objagg_obj);
+
+static void objagg_obj_parent_unassign(struct objagg *objagg,
+				       struct objagg_obj *objagg_obj)
+{
+	trace_objagg_obj_parent_unassign(objagg, objagg_obj,
+					 objagg_obj->parent,
+					 objagg_obj->parent->refcount);
+	objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv);
+	__objagg_obj_put(objagg, objagg_obj->parent);
+}
+
+static int objagg_obj_root_create(struct objagg *objagg,
+				  struct objagg_obj *objagg_obj)
+{
+	objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
+							 objagg_obj->obj);
+	if (IS_ERR(objagg_obj->root_priv))
+		return PTR_ERR(objagg_obj->root_priv);
+
+	trace_objagg_obj_root_create(objagg, objagg_obj);
+	return 0;
+}
+
+static void objagg_obj_root_destroy(struct objagg *objagg,
+				    struct objagg_obj *objagg_obj)
+{
+	trace_objagg_obj_root_destroy(objagg, objagg_obj);
+	objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
+}
+
+static int objagg_obj_init(struct objagg *objagg,
+			   struct objagg_obj *objagg_obj)
+{
+	int err;
+
+	/* Try to find if the object can be aggregated under an existing one. */
+	err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
+	if (!err)
+		return 0;
+	/* If aggregation is not possible, make the object a root. */
+	return objagg_obj_root_create(objagg, objagg_obj);
+}
+
+static void objagg_obj_fini(struct objagg *objagg,
+			    struct objagg_obj *objagg_obj)
+{
+	if (!objagg_obj_is_root(objagg_obj))
+		objagg_obj_parent_unassign(objagg, objagg_obj);
+	else
+		objagg_obj_root_destroy(objagg, objagg_obj);
+}
+
+static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj)
+{
+	struct objagg_obj *objagg_obj;
+	int err;
+
+	objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size,
+			     GFP_KERNEL);
+	if (!objagg_obj)
+		return ERR_PTR(-ENOMEM);
+	objagg_obj_ref_inc(objagg_obj);
+	memcpy(objagg_obj->obj, obj, objagg->ops->obj_size);
+
+	err = objagg_obj_init(objagg, objagg_obj);
+	if (err)
+		goto err_obj_init;
+
+	err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+				     objagg->ht_params);
+	if (err)
+		goto err_ht_insert;
+	list_add(&objagg_obj->list, &objagg->obj_list);
+	objagg->obj_count++;
+	trace_objagg_obj_create(objagg, objagg_obj);
+
+	return objagg_obj;
+
+err_ht_insert:
+	objagg_obj_fini(objagg, objagg_obj);
+err_obj_init:
+	kfree(objagg_obj);
+	return ERR_PTR(err);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
+{
+	struct objagg_obj *objagg_obj;
+
+	/* First, try to find the object exactly as user passed it,
+	 * perhaps it is already in use.
+	 */
+	objagg_obj = objagg_obj_lookup(objagg, obj);
+	if (objagg_obj) {
+		objagg_obj_ref_inc(objagg_obj);
+		return objagg_obj;
+	}
+
+	return objagg_obj_create(objagg, obj);
+}
+
+/**
+ * objagg_obj_get - gets an object within objagg instance
+ * @objagg:	objagg instance
+ * @obj:	user-specific private object pointer
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Size of the "obj" memory is specified in "objagg->ops".
+ *
+ * There are 3 main options this function wraps:
+ * 1) The object according to "obj" already exist. In that case
+ *    the reference counter is incrementes and the object is returned.
+ * 2) The object does not exist, but it can be aggregated within
+ *    another object. In that case, user ops->delta_create() is called
+ *    to obtain delta data and a new object is created with returned
+ *    user-delta private pointer.
+ * 3) The object does not exist and cannot be aggregated into
+ *    any of the existing objects. In that case, user ops->root_create()
+ *    is called to create the root and a new object is created with
+ *    returned user-root private pointer.
+ *
+ * Returns a pointer to objagg object instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj)
+{
+	struct objagg_obj *objagg_obj;
+
+	objagg_obj = __objagg_obj_get(objagg, obj);
+	if (IS_ERR(objagg_obj))
+		return objagg_obj;
+	objagg_obj_stats_inc(objagg_obj);
+	trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount);
+	return objagg_obj;
+}
+EXPORT_SYMBOL(objagg_obj_get);
+
+static void objagg_obj_destroy(struct objagg *objagg,
+			       struct objagg_obj *objagg_obj)
+{
+	trace_objagg_obj_destroy(objagg, objagg_obj);
+	--objagg->obj_count;
+	list_del(&objagg_obj->list);
+	rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+			       objagg->ht_params);
+	objagg_obj_fini(objagg, objagg_obj);
+	kfree(objagg_obj);
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+			     struct objagg_obj *objagg_obj)
+{
+	if (!objagg_obj_ref_dec(objagg_obj))
+		objagg_obj_destroy(objagg, objagg_obj);
+}
+
+/**
+ * objagg_obj_put - puts an object within objagg instance
+ * @objagg:	objagg instance
+ * @objagg_obj:	objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Symmetric to objagg_obj_get().
+ */
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj)
+{
+	trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount);
+	objagg_obj_stats_dec(objagg_obj);
+	__objagg_obj_put(objagg, objagg_obj);
+}
+EXPORT_SYMBOL(objagg_obj_put);
+
+/**
+ * objagg_create - creates a new objagg instance
+ * @ops:	user-specific callbacks
+ * @priv:	pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The purpose of the library is to provide an infrastructure to
+ * aggregate user-specified objects. Library does not care about the type
+ * of the object. User fills-up ops which take care of the specific
+ * user object manipulation.
+ *
+ * As a very stupid example, consider integer numbers. For example
+ * number 8 as a root object. That can aggregate number 9 with delta 1,
+ * number 10 with delta 2, etc. This example is implemented as
+ * a part of a testing module in test_objagg.c file.
+ *
+ * Each objagg instance contains multiple trees. Each tree node is
+ * represented by "an object". In the current implementation there can be
+ * only roots and leafs nodes. Leaf nodes are called deltas.
+ * But in general, this can be easily extended for intermediate nodes.
+ * In that extension, a delta would be associated with all non-root
+ * nodes.
+ *
+ * Returns a pointer to newly created objagg instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg *objagg_create(const struct objagg_ops *ops, void *priv)
+{
+	struct objagg *objagg;
+	int err;
+
+	if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
+		    !ops->delta_create || !ops->delta_destroy))
+		return ERR_PTR(-EINVAL);
+	objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
+	if (!objagg)
+		return ERR_PTR(-ENOMEM);
+	objagg->ops = ops;
+	objagg->priv = priv;
+	INIT_LIST_HEAD(&objagg->obj_list);
+
+	objagg->ht_params.key_len = ops->obj_size;
+	objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj);
+	objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node);
+
+	err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params);
+	if (err)
+		goto err_rhashtable_init;
+
+	trace_objagg_create(objagg);
+	return objagg;
+
+err_rhashtable_init:
+	kfree(objagg);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_create);
+
+/**
+ * objagg_destroy - destroys a new objagg instance
+ * @objagg:	objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_destroy(struct objagg *objagg)
+{
+	trace_objagg_destroy(objagg);
+	WARN_ON(!list_empty(&objagg->obj_list));
+	rhashtable_destroy(&objagg->obj_ht);
+	kfree(objagg);
+}
+EXPORT_SYMBOL(objagg_destroy);
+
+static int objagg_stats_info_sort_cmp_func(const void *a, const void *b)
+{
+	const struct objagg_obj_stats_info *stats_info1 = a;
+	const struct objagg_obj_stats_info *stats_info2 = b;
+
+	if (stats_info1->is_root != stats_info2->is_root)
+		return stats_info2->is_root - stats_info1->is_root;
+	if (stats_info1->stats.delta_user_count !=
+	    stats_info2->stats.delta_user_count)
+		return stats_info2->stats.delta_user_count -
+		       stats_info1->stats.delta_user_count;
+	return stats_info2->stats.user_count - stats_info1->stats.user_count;
+}
+
+/**
+ * objagg_stats_get - obtains stats of the objagg instance
+ * @objagg:	objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all object
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ *    indexes.
+ * 3) In case more objects have the same delta user count,
+ *    the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
+{
+	struct objagg_stats *objagg_stats;
+	struct objagg_obj *objagg_obj;
+	size_t alloc_size;
+	int i;
+
+	alloc_size = sizeof(*objagg_stats) +
+		     sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
+	objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
+	if (!objagg_stats)
+		return ERR_PTR(-ENOMEM);
+
+	i = 0;
+	list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+		memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats,
+		       sizeof(objagg_stats->stats_info[0].stats));
+		objagg_stats->stats_info[i].objagg_obj = objagg_obj;
+		objagg_stats->stats_info[i].is_root =
+					objagg_obj_is_root(objagg_obj);
+		i++;
+	}
+	objagg_stats->stats_info_count = i;
+
+	sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+	     sizeof(struct objagg_obj_stats_info),
+	     objagg_stats_info_sort_cmp_func, NULL);
+
+	return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_stats_get);
+
+/**
+ * objagg_stats_puts - puts stats of the objagg instance
+ * @objagg_stats:	objagg instance stats
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_stats_put(const struct objagg_stats *objagg_stats)
+{
+	kfree(objagg_stats);
+}
+EXPORT_SYMBOL(objagg_stats_put);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Object aggregation manager");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa22bca..f3e5707 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -39,6 +39,7 @@
 #define SKB_HASH	0x1234aaab
 #define SKB_QUEUE_MAP	123
 #define SKB_VLAN_TCI	0xffff
+#define SKB_VLAN_PRESENT	1
 #define SKB_DEV_IFINDEX	577
 #define SKB_DEV_TYPE	588
 
@@ -725,8 +726,8 @@ static struct bpf_test tests[] = {
 		CLASSIC,
 		{ },
 		{
-			{ 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
-			{ 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+			{ 1, SKB_VLAN_TCI },
+			{ 10, SKB_VLAN_TCI }
 		},
 	},
 	{
@@ -739,8 +740,8 @@ static struct bpf_test tests[] = {
 		CLASSIC,
 		{ },
 		{
-			{ 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
-			{ 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+			{ 1, SKB_VLAN_PRESENT },
+			{ 10, SKB_VLAN_PRESENT }
 		},
 	},
 	{
@@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = {
 #endif
 		{ },
 		{
-			{  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
-			{ 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+			{  1, SKB_VLAN_PRESENT },
+			{ 10, SKB_VLAN_PRESENT }
 		},
 		.fill_helper = bpf_fill_maxinsns6,
 		.expected_errcode = -ENOTSUPP,
@@ -6493,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
 	skb->hash = SKB_HASH;
 	skb->queue_mapping = SKB_QUEUE_MAP;
 	skb->vlan_tci = SKB_VLAN_TCI;
+	skb->vlan_present = SKB_VLAN_PRESENT;
 	skb->vlan_proto = htons(ETH_P_IP);
 	dev_net_set(&dev, &init_net);
 	skb->dev = &dev;
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
new file mode 100644
index 0000000..ab57144
--- /dev/null
+++ b/lib/test_objagg.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/objagg.h>
+
+struct tokey {
+	unsigned int id;
+};
+
+#define NUM_KEYS 32
+
+static int key_id_index(unsigned int key_id)
+{
+	if (key_id >= NUM_KEYS) {
+		WARN_ON(1);
+		return 0;
+	}
+	return key_id;
+}
+
+#define BUF_LEN 128
+
+struct world {
+	unsigned int root_count;
+	unsigned int delta_count;
+	char next_root_buf[BUF_LEN];
+	struct objagg_obj *objagg_objs[NUM_KEYS];
+	unsigned int key_refs[NUM_KEYS];
+};
+
+struct root {
+	struct tokey key;
+	char buf[BUF_LEN];
+};
+
+struct delta {
+	unsigned int key_id_diff;
+};
+
+static struct objagg_obj *world_obj_get(struct world *world,
+					struct objagg *objagg,
+					unsigned int key_id)
+{
+	struct objagg_obj *objagg_obj;
+	struct tokey key;
+	int err;
+
+	key.id = key_id;
+	objagg_obj = objagg_obj_get(objagg, &key);
+	if (IS_ERR(objagg_obj)) {
+		pr_err("Key %u: Failed to get object.\n", key_id);
+		return objagg_obj;
+	}
+	if (!world->key_refs[key_id_index(key_id)]) {
+		world->objagg_objs[key_id_index(key_id)] = objagg_obj;
+	} else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
+		pr_err("Key %u: God another object for the same key.\n",
+		       key_id);
+		err = -EINVAL;
+		goto err_key_id_check;
+	}
+	world->key_refs[key_id_index(key_id)]++;
+	return objagg_obj;
+
+err_key_id_check:
+	objagg_obj_put(objagg, objagg_obj);
+	return ERR_PTR(err);
+}
+
+static void world_obj_put(struct world *world, struct objagg *objagg,
+			  unsigned int key_id)
+{
+	struct objagg_obj *objagg_obj;
+
+	if (!world->key_refs[key_id_index(key_id)])
+		return;
+	objagg_obj = world->objagg_objs[key_id_index(key_id)];
+	objagg_obj_put(objagg, objagg_obj);
+	world->key_refs[key_id_index(key_id)]--;
+}
+
+#define MAX_KEY_ID_DIFF 5
+
+static void *delta_create(void *priv, void *parent_obj, void *obj)
+{
+	struct tokey *parent_key = parent_obj;
+	struct world *world = priv;
+	struct tokey *key = obj;
+	int diff = key->id - parent_key->id;
+	struct delta *delta;
+
+	if (diff < 0 || diff > MAX_KEY_ID_DIFF)
+		return ERR_PTR(-EINVAL);
+
+	delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+	if (!delta)
+		return ERR_PTR(-ENOMEM);
+	delta->key_id_diff = diff;
+	world->delta_count++;
+	return delta;
+}
+
+static void delta_destroy(void *priv, void *delta_priv)
+{
+	struct delta *delta = delta_priv;
+	struct world *world = priv;
+
+	world->delta_count--;
+	kfree(delta);
+}
+
+static void *root_create(void *priv, void *obj)
+{
+	struct world *world = priv;
+	struct tokey *key = obj;
+	struct root *root;
+
+	root = kzalloc(sizeof(*root), GFP_KERNEL);
+	if (!root)
+		return ERR_PTR(-ENOMEM);
+	memcpy(&root->key, key, sizeof(root->key));
+	memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
+	world->root_count++;
+	return root;
+}
+
+static void root_destroy(void *priv, void *root_priv)
+{
+	struct root *root = root_priv;
+	struct world *world = priv;
+
+	world->root_count--;
+	kfree(root);
+}
+
+static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
+				unsigned int key_id, bool should_create_root)
+{
+	unsigned int orig_root_count = world->root_count;
+	struct objagg_obj *objagg_obj;
+	const struct root *root;
+	int err;
+
+	if (should_create_root)
+		prandom_bytes(world->next_root_buf,
+			      sizeof(world->next_root_buf));
+
+	objagg_obj = world_obj_get(world, objagg, key_id);
+	if (IS_ERR(objagg_obj)) {
+		pr_err("Key %u: Failed to get object.\n", key_id);
+		return PTR_ERR(objagg_obj);
+	}
+	if (should_create_root) {
+		if (world->root_count != orig_root_count + 1) {
+			pr_err("Key %u: Root was not created\n", key_id);
+			err = -EINVAL;
+			goto err_check_root_count;
+		}
+	} else {
+		if (world->root_count != orig_root_count) {
+			pr_err("Key %u: Root was incorrectly created\n",
+			       key_id);
+			err = -EINVAL;
+			goto err_check_root_count;
+		}
+	}
+	root = objagg_obj_root_priv(objagg_obj);
+	if (root->key.id != key_id) {
+		pr_err("Key %u: Root has unexpected key id\n", key_id);
+		err = -EINVAL;
+		goto err_check_key_id;
+	}
+	if (should_create_root &&
+	    memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
+		pr_err("Key %u: Buffer does not match the expected content\n",
+		       key_id);
+		err = -EINVAL;
+		goto err_check_buf;
+	}
+	return 0;
+
+err_check_buf:
+err_check_key_id:
+err_check_root_count:
+	objagg_obj_put(objagg, objagg_obj);
+	return err;
+}
+
+static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
+				unsigned int key_id, bool should_destroy_root)
+{
+	unsigned int orig_root_count = world->root_count;
+
+	world_obj_put(world, objagg, key_id);
+
+	if (should_destroy_root) {
+		if (world->root_count != orig_root_count - 1) {
+			pr_err("Key %u: Root was not destroyed\n", key_id);
+			return -EINVAL;
+		}
+	} else {
+		if (world->root_count != orig_root_count) {
+			pr_err("Key %u: Root was incorrectly destroyed\n",
+			       key_id);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int check_stats_zero(struct objagg *objagg)
+{
+	const struct objagg_stats *stats;
+	int err = 0;
+
+	stats = objagg_stats_get(objagg);
+	if (IS_ERR(stats))
+		return PTR_ERR(stats);
+
+	if (stats->stats_info_count != 0) {
+		pr_err("Stats: Object count is not zero while it should be\n");
+		err = -EINVAL;
+	}
+
+	objagg_stats_put(stats);
+	return err;
+}
+
+static int check_stats_nodelta(struct objagg *objagg)
+{
+	const struct objagg_stats *stats;
+	int i;
+	int err;
+
+	stats = objagg_stats_get(objagg);
+	if (IS_ERR(stats))
+		return PTR_ERR(stats);
+
+	if (stats->stats_info_count != NUM_KEYS) {
+		pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
+		       NUM_KEYS, stats->stats_info_count);
+		err = -EINVAL;
+		goto stats_put;
+	}
+
+	for (i = 0; i < stats->stats_info_count; i++) {
+		if (stats->stats_info[i].stats.user_count != 2) {
+			pr_err("Stats: incorrect user count\n");
+			err = -EINVAL;
+			goto stats_put;
+		}
+		if (stats->stats_info[i].stats.delta_user_count != 2) {
+			pr_err("Stats: incorrect delta user count\n");
+			err = -EINVAL;
+			goto stats_put;
+		}
+	}
+	err = 0;
+
+stats_put:
+	objagg_stats_put(stats);
+	return err;
+}
+
+static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static void delta_destroy_dummy(void *priv, void *delta_priv)
+{
+}
+
+static const struct objagg_ops nodelta_ops = {
+	.obj_size = sizeof(struct tokey),
+	.delta_create = delta_create_dummy,
+	.delta_destroy = delta_destroy_dummy,
+	.root_create = root_create,
+	.root_destroy = root_destroy,
+};
+
+static int test_nodelta(void)
+{
+	struct world world = {};
+	struct objagg *objagg;
+	int i;
+	int err;
+
+	objagg = objagg_create(&nodelta_ops, &world);
+	if (IS_ERR(objagg))
+		return PTR_ERR(objagg);
+
+	err = check_stats_zero(objagg);
+	if (err)
+		goto err_stats_first_zero;
+
+	/* First round of gets, the root objects should be created */
+	for (i = 0; i < NUM_KEYS; i++) {
+		err = test_nodelta_obj_get(&world, objagg, i, true);
+		if (err)
+			goto err_obj_first_get;
+	}
+
+	/* Do the second round of gets, all roots are already created,
+	 * make sure that no new root is created
+	 */
+	for (i = 0; i < NUM_KEYS; i++) {
+		err = test_nodelta_obj_get(&world, objagg, i, false);
+		if (err)
+			goto err_obj_second_get;
+	}
+
+	err = check_stats_nodelta(objagg);
+	if (err)
+		goto err_stats_nodelta;
+
+	for (i = NUM_KEYS - 1; i >= 0; i--) {
+		err = test_nodelta_obj_put(&world, objagg, i, false);
+		if (err)
+			goto err_obj_first_put;
+	}
+	for (i = NUM_KEYS - 1; i >= 0; i--) {
+		err = test_nodelta_obj_put(&world, objagg, i, true);
+		if (err)
+			goto err_obj_second_put;
+	}
+
+	err = check_stats_zero(objagg);
+	if (err)
+		goto err_stats_second_zero;
+
+	objagg_destroy(objagg);
+	return 0;
+
+err_stats_nodelta:
+err_obj_first_put:
+err_obj_second_get:
+	for (i--; i >= 0; i--)
+		world_obj_put(&world, objagg, i);
+
+	i = NUM_KEYS;
+err_obj_first_get:
+err_obj_second_put:
+	for (i--; i >= 0; i--)
+		world_obj_put(&world, objagg, i);
+err_stats_first_zero:
+err_stats_second_zero:
+	objagg_destroy(objagg);
+	return err;
+}
+
+static const struct objagg_ops delta_ops = {
+	.obj_size = sizeof(struct tokey),
+	.delta_create = delta_create,
+	.delta_destroy = delta_destroy,
+	.root_create = root_create,
+	.root_destroy = root_destroy,
+};
+
+enum action {
+	ACTION_GET,
+	ACTION_PUT,
+};
+
+enum expect_delta {
+	EXPECT_DELTA_SAME,
+	EXPECT_DELTA_INC,
+	EXPECT_DELTA_DEC,
+};
+
+enum expect_root {
+	EXPECT_ROOT_SAME,
+	EXPECT_ROOT_INC,
+	EXPECT_ROOT_DEC,
+};
+
+struct expect_stats_info {
+	struct objagg_obj_stats stats;
+	bool is_root;
+	unsigned int key_id;
+};
+
+struct expect_stats {
+	unsigned int info_count;
+	struct expect_stats_info info[NUM_KEYS];
+};
+
+struct action_item {
+	unsigned int key_id;
+	enum action action;
+	enum expect_delta expect_delta;
+	enum expect_root expect_root;
+	struct expect_stats expect_stats;
+};
+
+#define EXPECT_STATS(count, ...)		\
+{						\
+	.info_count = count,			\
+	.info = { __VA_ARGS__ }			\
+}
+
+#define ROOT(key_id, user_count, delta_user_count)	\
+	{{user_count, delta_user_count}, true, key_id}
+
+#define DELTA(key_id, user_count)			\
+	{{user_count, user_count}, false, key_id}
+
+static const struct action_item action_items[] = {
+	{
+		1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+		EXPECT_STATS(1, ROOT(1, 1, 1)),
+	},	/* r: 1			d: */
+	{
+		7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+		EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
+	},	/* r: 1, 7		d: */
+	{
+		3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
+				DELTA(3, 1)),
+	},	/* r: 1, 7		d: 3^1 */
+	{
+		5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
+				DELTA(3, 1), DELTA(5, 1)),
+	},	/* r: 1, 7		d: 3^1, 5^1 */
+	{
+		3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
+				DELTA(3, 2), DELTA(5, 1)),
+	},	/* r: 1, 7		d: 3^1, 3^1, 5^1 */
+	{
+		1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
+				DELTA(3, 2), DELTA(5, 1)),
+	},	/* r: 1, 1, 7		d: 3^1, 3^1, 5^1 */
+	{
+		30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+		EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
+				DELTA(3, 2), DELTA(5, 1)),
+	},	/* r: 1, 1, 7, 30	d: 3^1, 3^1, 5^1 */
+	{
+		8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
+				DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
+	},	/* r: 1, 1, 7, 30	d: 3^1, 3^1, 5^1, 8^7 */
+	{
+		8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
+				DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
+	},	/* r: 1, 1, 7, 30	d: 3^1, 3^1, 5^1, 8^7, 8^7 */
+	{
+		3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
+				DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
+	},	/* r: 1, 1, 7, 30	d: 3^1, 5^1, 8^7, 8^7 */
+	{
+		3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
+				DELTA(8, 2), DELTA(5, 1)),
+	},	/* r: 1, 1, 7, 30	d: 5^1, 8^7, 8^7 */
+	{
+		1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
+				DELTA(8, 2), DELTA(5, 1)),
+	},	/* r: 1, 7, 30		d: 5^1, 8^7, 8^7 */
+	{
+		1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
+				DELTA(8, 2), DELTA(5, 1)),
+	},	/* r: 7, 30		d: 5^1, 8^7, 8^7 */
+	{
+		5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+		EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
+				DELTA(8, 2)),
+	},	/* r: 7, 30		d: 8^7, 8^7 */
+	{
+		5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+		EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
+				DELTA(8, 2)),
+	},	/* r: 7, 30, 5		d: 8^7, 8^7 */
+	{
+		6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+				DELTA(8, 2), DELTA(6, 1)),
+	},	/* r: 7, 30, 5		d: 8^7, 8^7, 6^5 */
+	{
+		8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
+				DELTA(8, 3), DELTA(6, 1)),
+	},	/* r: 7, 30, 5		d: 8^7, 8^7, 8^7, 6^5 */
+	{
+		8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+				DELTA(8, 2), DELTA(6, 1)),
+	},	/* r: 7, 30, 5		d: 8^7, 8^7, 6^5 */
+	{
+		8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
+				DELTA(8, 1), DELTA(6, 1)),
+	},	/* r: 7, 30, 5		d: 8^7, 6^5 */
+	{
+		8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
+				DELTA(6, 1)),
+	},	/* r: 7, 30, 5		d: 6^5 */
+	{
+		8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
+				DELTA(6, 1), DELTA(8, 1)),
+	},	/* r: 7, 30, 5		d: 6^5, 8^5 */
+	{
+		7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+		EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
+				DELTA(6, 1), DELTA(8, 1)),
+	},	/* r: 30, 5		d: 6^5, 8^5 */
+	{
+		30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+		EXPECT_STATS(3, ROOT(5, 1, 3),
+				DELTA(6, 1), DELTA(8, 1)),
+	},	/* r: 5			d: 6^5, 8^5 */
+	{
+		5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+		EXPECT_STATS(3, ROOT(5, 0, 2),
+				DELTA(6, 1), DELTA(8, 1)),
+	},	/* r:			d: 6^5, 8^5 */
+	{
+		6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+		EXPECT_STATS(2, ROOT(5, 0, 1),
+				DELTA(8, 1)),
+	},	/* r:			d: 6^5 */
+	{
+		8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+		EXPECT_STATS(0, ),
+	},	/* r:			d: */
+};
+
+static int check_expect(struct world *world,
+			const struct action_item *action_item,
+			unsigned int orig_delta_count,
+			unsigned int orig_root_count)
+{
+	unsigned int key_id = action_item->key_id;
+
+	switch (action_item->expect_delta) {
+	case EXPECT_DELTA_SAME:
+		if (orig_delta_count != world->delta_count) {
+			pr_err("Key %u: Delta count changed while expected to remain the same.\n",
+			       key_id);
+			return -EINVAL;
+		}
+		break;
+	case EXPECT_DELTA_INC:
+		if (WARN_ON(action_item->action == ACTION_PUT))
+			return -EINVAL;
+		if (orig_delta_count + 1 != world->delta_count) {
+			pr_err("Key %u: Delta count was not incremented.\n",
+			       key_id);
+			return -EINVAL;
+		}
+		break;
+	case EXPECT_DELTA_DEC:
+		if (WARN_ON(action_item->action == ACTION_GET))
+			return -EINVAL;
+		if (orig_delta_count - 1 != world->delta_count) {
+			pr_err("Key %u: Delta count was not decremented.\n",
+			       key_id);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	switch (action_item->expect_root) {
+	case EXPECT_ROOT_SAME:
+		if (orig_root_count != world->root_count) {
+			pr_err("Key %u: Root count changed while expected to remain the same.\n",
+			       key_id);
+			return -EINVAL;
+		}
+		break;
+	case EXPECT_ROOT_INC:
+		if (WARN_ON(action_item->action == ACTION_PUT))
+			return -EINVAL;
+		if (orig_root_count + 1 != world->root_count) {
+			pr_err("Key %u: Root count was not incremented.\n",
+			       key_id);
+			return -EINVAL;
+		}
+		break;
+	case EXPECT_ROOT_DEC:
+		if (WARN_ON(action_item->action == ACTION_GET))
+			return -EINVAL;
+		if (orig_root_count - 1 != world->root_count) {
+			pr_err("Key %u: Root count was not decremented.\n",
+			       key_id);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
+{
+	const struct tokey *root_key;
+	const struct delta *delta;
+	unsigned int key_id;
+
+	root_key = objagg_obj_root_priv(objagg_obj);
+	key_id = root_key->id;
+	delta = objagg_obj_delta_priv(objagg_obj);
+	if (delta)
+		key_id += delta->key_id_diff;
+	return key_id;
+}
+
+static int
+check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
+			const struct expect_stats_info *expect_stats_info,
+			const char **errmsg)
+{
+	if (stats_info->is_root != expect_stats_info->is_root) {
+		if (errmsg)
+			*errmsg = "Incorrect root/delta indication";
+		return -EINVAL;
+	}
+	if (stats_info->stats.user_count !=
+	    expect_stats_info->stats.user_count) {
+		if (errmsg)
+			*errmsg = "Incorrect user count";
+		return -EINVAL;
+	}
+	if (stats_info->stats.delta_user_count !=
+	    expect_stats_info->stats.delta_user_count) {
+		if (errmsg)
+			*errmsg = "Incorrect delta user count";
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
+			  const struct expect_stats_info *expect_stats_info,
+			  const char **errmsg)
+{
+	if (obj_to_key_id(stats_info->objagg_obj) !=
+	    expect_stats_info->key_id) {
+		if (errmsg)
+			*errmsg = "incorrect key id";
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int check_expect_stats_neigh(const struct objagg_stats *stats,
+				    const struct expect_stats *expect_stats,
+				    int pos)
+{
+	int i;
+	int err;
+
+	for (i = pos - 1; i >= 0; i--) {
+		err = check_expect_stats_nums(&stats->stats_info[i],
+					      &expect_stats->info[pos], NULL);
+		if (err)
+			break;
+		err = check_expect_stats_key_id(&stats->stats_info[i],
+						&expect_stats->info[pos], NULL);
+		if (!err)
+			return 0;
+	}
+	for (i = pos + 1; i < stats->stats_info_count; i++) {
+		err = check_expect_stats_nums(&stats->stats_info[i],
+					      &expect_stats->info[pos], NULL);
+		if (err)
+			break;
+		err = check_expect_stats_key_id(&stats->stats_info[i],
+						&expect_stats->info[pos], NULL);
+		if (!err)
+			return 0;
+	}
+	return -EINVAL;
+}
+
+static int __check_expect_stats(const struct objagg_stats *stats,
+				const struct expect_stats *expect_stats,
+				const char **errmsg)
+{
+	int i;
+	int err;
+
+	if (stats->stats_info_count != expect_stats->info_count) {
+		*errmsg = "Unexpected object count";
+		return -EINVAL;
+	}
+
+	for (i = 0; i < stats->stats_info_count; i++) {
+		err = check_expect_stats_nums(&stats->stats_info[i],
+					      &expect_stats->info[i], errmsg);
+		if (err)
+			return err;
+		err = check_expect_stats_key_id(&stats->stats_info[i],
+						&expect_stats->info[i], errmsg);
+		if (err) {
+			/* It is possible that one of the neighbor stats with
+			 * same numbers have the correct key id, so check it
+			 */
+			err = check_expect_stats_neigh(stats, expect_stats, i);
+			if (err)
+				return err;
+		}
+	}
+	return 0;
+}
+
+static int check_expect_stats(struct objagg *objagg,
+			      const struct expect_stats *expect_stats,
+			      const char **errmsg)
+{
+	const struct objagg_stats *stats;
+	int err;
+
+	stats = objagg_stats_get(objagg);
+	if (IS_ERR(stats))
+		return PTR_ERR(stats);
+	err = __check_expect_stats(stats, expect_stats, errmsg);
+	objagg_stats_put(stats);
+	return err;
+}
+
+static int test_delta_action_item(struct world *world,
+				  struct objagg *objagg,
+				  const struct action_item *action_item,
+				  bool inverse)
+{
+	unsigned int orig_delta_count = world->delta_count;
+	unsigned int orig_root_count = world->root_count;
+	unsigned int key_id = action_item->key_id;
+	enum action action = action_item->action;
+	struct objagg_obj *objagg_obj;
+	const char *errmsg;
+	int err;
+
+	if (inverse)
+		action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
+
+	switch (action) {
+	case ACTION_GET:
+		objagg_obj = world_obj_get(world, objagg, key_id);
+		if (IS_ERR(objagg_obj))
+			return PTR_ERR(objagg_obj);
+		break;
+	case ACTION_PUT:
+		world_obj_put(world, objagg, key_id);
+		break;
+	}
+
+	if (inverse)
+		return 0;
+	err = check_expect(world, action_item,
+			   orig_delta_count, orig_root_count);
+	if (err)
+		goto errout;
+
+	errmsg = NULL;
+	err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
+	if (err) {
+		pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
+		goto errout;
+	}
+
+	return 0;
+
+errout:
+	/* This can only happen when action is not inversed.
+	 * So in case of an error, cleanup by doing inverse action.
+	 */
+	test_delta_action_item(world, objagg, action_item, true);
+	return err;
+}
+
+static int test_delta(void)
+{
+	struct world world = {};
+	struct objagg *objagg;
+	int i;
+	int err;
+
+	objagg = objagg_create(&delta_ops, &world);
+	if (IS_ERR(objagg))
+		return PTR_ERR(objagg);
+
+	for (i = 0; i < ARRAY_SIZE(action_items); i++) {
+		err = test_delta_action_item(&world, objagg,
+					     &action_items[i], false);
+		if (err)
+			goto err_do_action_item;
+	}
+
+	objagg_destroy(objagg);
+	return 0;
+
+err_do_action_item:
+	for (i--; i >= 0; i--)
+		test_delta_action_item(&world, objagg, &action_items[i], true);
+
+	objagg_destroy(objagg);
+	return err;
+}
+
+static int __init test_objagg_init(void)
+{
+	int err;
+
+	err = test_nodelta();
+	if (err)
+		return err;
+	return test_delta();
+}
+
+static void __exit test_objagg_exit(void)
+{
+}
+
+module_init(test_objagg_init);
+module_exit(test_objagg_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for objagg");
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 5e99504..aef1a97 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -330,6 +330,7 @@ static void vlan_transfer_features(struct net_device *dev,
 
 	vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
 	vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
+	vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev);
 
 	netdev_update_features(vlandev);
 }
@@ -647,93 +648,6 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
 	return err;
 }
 
-static struct sk_buff *vlan_gro_receive(struct list_head *head,
-					struct sk_buff *skb)
-{
-	const struct packet_offload *ptype;
-	unsigned int hlen, off_vlan;
-	struct sk_buff *pp = NULL;
-	struct vlan_hdr *vhdr;
-	struct sk_buff *p;
-	__be16 type;
-	int flush = 1;
-
-	off_vlan = skb_gro_offset(skb);
-	hlen = off_vlan + sizeof(*vhdr);
-	vhdr = skb_gro_header_fast(skb, off_vlan);
-	if (skb_gro_header_hard(skb, hlen)) {
-		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
-		if (unlikely(!vhdr))
-			goto out;
-	}
-
-	type = vhdr->h_vlan_encapsulated_proto;
-
-	rcu_read_lock();
-	ptype = gro_find_receive_by_type(type);
-	if (!ptype)
-		goto out_unlock;
-
-	flush = 0;
-
-	list_for_each_entry(p, head, list) {
-		struct vlan_hdr *vhdr2;
-
-		if (!NAPI_GRO_CB(p)->same_flow)
-			continue;
-
-		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
-		if (compare_vlan_header(vhdr, vhdr2))
-			NAPI_GRO_CB(p)->same_flow = 0;
-	}
-
-	skb_gro_pull(skb, sizeof(*vhdr));
-	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
-	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
-
-out_unlock:
-	rcu_read_unlock();
-out:
-	skb_gro_flush_final(skb, pp, flush);
-
-	return pp;
-}
-
-static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
-{
-	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
-	__be16 type = vhdr->h_vlan_encapsulated_proto;
-	struct packet_offload *ptype;
-	int err = -ENOENT;
-
-	rcu_read_lock();
-	ptype = gro_find_complete_by_type(type);
-	if (ptype)
-		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
-
-	rcu_read_unlock();
-	return err;
-}
-
-static struct packet_offload vlan_packet_offloads[] __read_mostly = {
-	{
-		.type = cpu_to_be16(ETH_P_8021Q),
-		.priority = 10,
-		.callbacks = {
-			.gro_receive = vlan_gro_receive,
-			.gro_complete = vlan_gro_complete,
-		},
-	},
-	{
-		.type = cpu_to_be16(ETH_P_8021AD),
-		.priority = 10,
-		.callbacks = {
-			.gro_receive = vlan_gro_receive,
-			.gro_complete = vlan_gro_complete,
-		},
-	},
-};
-
 static int __net_init vlan_init_net(struct net *net)
 {
 	struct vlan_net *vn = net_generic(net, vlan_net_id);
@@ -761,7 +675,6 @@ static struct pernet_operations vlan_net_ops = {
 static int __init vlan_proto_init(void)
 {
 	int err;
-	unsigned int i;
 
 	pr_info("%s v%s\n", vlan_fullname, vlan_version);
 
@@ -785,9 +698,6 @@ static int __init vlan_proto_init(void)
 	if (err < 0)
 		goto err5;
 
-	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
-		dev_add_offload(&vlan_packet_offloads[i]);
-
 	vlan_ioctl_set(vlan_ioctl_handler);
 	return 0;
 
@@ -805,13 +715,8 @@ static int __init vlan_proto_init(void)
 
 static void __exit vlan_cleanup_module(void)
 {
-	unsigned int i;
-
 	vlan_ioctl_set(NULL);
 
-	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
-		dev_remove_offload(&vlan_packet_offloads[i]);
-
 	vlan_netlink_fini();
 
 	unregister_netdevice_notifier(&vlan_notifier_block);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 44df1c3..c46daf0 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -92,6 +92,18 @@ static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
 	return NULL;
 }
 
+static inline netdev_features_t vlan_tnl_features(struct net_device *real_dev)
+{
+	netdev_features_t ret;
+
+	ret = real_dev->hw_enc_features &
+	      (NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO | NETIF_F_GSO_ENCAP_ALL);
+
+	if ((ret & NETIF_F_GSO_ENCAP_ALL) && (ret & NETIF_F_CSUM_MASK))
+		return (ret & ~NETIF_F_CSUM_MASK) | NETIF_F_HW_CSUM;
+	return 0;
+}
+
 #define vlan_group_for_each_dev(grp, i, dev) \
 	for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \
 		if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4f60e86..a313165 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -57,7 +57,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
 	}
 
 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
-	skb->vlan_tci = 0;
+	__vlan_hwaccel_clear_tag(skb);
 
 	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
 
@@ -223,6 +223,33 @@ static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vi
 		return -ENODEV;
 }
 
+int vlan_for_each(struct net_device *dev,
+		  int (*action)(struct net_device *dev, int vid, void *arg),
+		  void *arg)
+{
+	struct vlan_vid_info *vid_info;
+	struct vlan_info *vlan_info;
+	struct net_device *vdev;
+	int ret;
+
+	ASSERT_RTNL();
+
+	vlan_info = rtnl_dereference(dev->vlan_info);
+	if (!vlan_info)
+		return 0;
+
+	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+		vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
+					     vid_info->vid);
+		ret = action(vdev, vid_info->vid, arg);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(vlan_for_each);
+
 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
 {
 	struct net_device *real_dev = vlan_info->real_dev;
@@ -426,3 +453,102 @@ bool vlan_uses_dev(const struct net_device *dev)
 	return vlan_info->grp.nr_vlan_devs ? true : false;
 }
 EXPORT_SYMBOL(vlan_uses_dev);
+
+static struct sk_buff *vlan_gro_receive(struct list_head *head,
+					struct sk_buff *skb)
+{
+	const struct packet_offload *ptype;
+	unsigned int hlen, off_vlan;
+	struct sk_buff *pp = NULL;
+	struct vlan_hdr *vhdr;
+	struct sk_buff *p;
+	__be16 type;
+	int flush = 1;
+
+	off_vlan = skb_gro_offset(skb);
+	hlen = off_vlan + sizeof(*vhdr);
+	vhdr = skb_gro_header_fast(skb, off_vlan);
+	if (skb_gro_header_hard(skb, hlen)) {
+		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
+		if (unlikely(!vhdr))
+			goto out;
+	}
+
+	type = vhdr->h_vlan_encapsulated_proto;
+
+	rcu_read_lock();
+	ptype = gro_find_receive_by_type(type);
+	if (!ptype)
+		goto out_unlock;
+
+	flush = 0;
+
+	list_for_each_entry(p, head, list) {
+		struct vlan_hdr *vhdr2;
+
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
+		if (compare_vlan_header(vhdr, vhdr2))
+			NAPI_GRO_CB(p)->same_flow = 0;
+	}
+
+	skb_gro_pull(skb, sizeof(*vhdr));
+	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	skb_gro_flush_final(skb, pp, flush);
+
+	return pp;
+}
+
+static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
+	__be16 type = vhdr->h_vlan_encapsulated_proto;
+	struct packet_offload *ptype;
+	int err = -ENOENT;
+
+	rcu_read_lock();
+	ptype = gro_find_complete_by_type(type);
+	if (ptype)
+		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
+
+	rcu_read_unlock();
+	return err;
+}
+
+static struct packet_offload vlan_packet_offloads[] __read_mostly = {
+	{
+		.type = cpu_to_be16(ETH_P_8021Q),
+		.priority = 10,
+		.callbacks = {
+			.gro_receive = vlan_gro_receive,
+			.gro_complete = vlan_gro_complete,
+		},
+	},
+	{
+		.type = cpu_to_be16(ETH_P_8021AD),
+		.priority = 10,
+		.callbacks = {
+			.gro_receive = vlan_gro_receive,
+			.gro_complete = vlan_gro_complete,
+		},
+	},
+};
+
+static int __init vlan_offload_init(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+		dev_add_offload(&vlan_packet_offloads[i]);
+
+	return 0;
+}
+
+fs_initcall(vlan_offload_init);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ff720f1..b2d9c8f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -562,6 +562,7 @@ static int vlan_dev_init(struct net_device *dev)
 
 	dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
 			   NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
+			   NETIF_F_GSO_ENCAP_ALL |
 			   NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
 			   NETIF_F_ALL_FCOE;
 
@@ -572,6 +573,7 @@ static int vlan_dev_init(struct net_device *dev)
 		netdev_warn(real_dev, "VLAN features are set incorrectly.  Q-in-Q configurations may not work correctly.\n");
 
 	dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+	dev->hw_enc_features = vlan_tnl_features(real_dev);
 
 	/* ipv6 shared card related stuff */
 	dev->dev_id = real_dev->dev_id;
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index f75816f..c386e69 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -22,7 +22,6 @@
 config BATMAN_ADV
 	tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
 	depends on NET
-	select CRC16
 	select LIBCRC32C
 	help
           B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
@@ -48,6 +47,7 @@
 config BATMAN_ADV_BLA
 	bool "Bridge Loop Avoidance"
 	depends on BATMAN_ADV && INET
+	select CRC16
 	default y
 	help
 	  This option enables BLA (Bridge Loop Avoidance), a mechanism
@@ -82,6 +82,7 @@
 config BATMAN_ADV_MCAST
 	bool "Multicast optimisation"
 	depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y)
+	default y
 	help
 	  This option enables the multicast optimisation which aims to
 	  reduce the air overhead while improving the reliability of
@@ -100,12 +101,13 @@
 
 config BATMAN_ADV_DEBUG
 	bool "B.A.T.M.A.N. debugging"
-	depends on BATMAN_ADV_DEBUGFS
+	depends on BATMAN_ADV
 	help
 	  This is an option for use by developers; most people should
 	  say N here. This enables compilation of support for
-	  outputting debugging information to the kernel log. The
-	  output is controlled via the module parameter debug.
+	  outputting debugging information to the debugfs log or tracing
+	  buffer. The output is controlled via the batadv netdev specific
+	  log_level setting.
 
 config BATMAN_ADV_TRACING
 	bool "B.A.T.M.A.N. tracing support"
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index d222709..f97e566 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -34,7 +34,6 @@
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
-#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/pkt_sched.h>
@@ -2585,13 +2584,14 @@ static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
  * batadv_iv_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
  * @gw_node: Gateway to be dumped
  *
  * Return: Error code, or 0 on success
  */
-static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid,
+				   struct netlink_callback *cb,
 				   struct batadv_priv *bat_priv,
 				   struct batadv_gw_node *gw_node)
 {
@@ -2611,13 +2611,16 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 
 	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
-			  NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS);
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family, NLM_F_MULTI,
+			  BATADV_CMD_GET_GATEWAYS);
 	if (!hdr) {
 		ret = -ENOBUFS;
 		goto out;
 	}
 
+	genl_dump_check_consistent(cb, hdr);
+
 	ret = -EMSGSIZE;
 
 	if (curr_gw == gw_node)
@@ -2668,13 +2671,15 @@ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
 	int idx_skip = cb->args[0];
 	int idx = 0;
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
+	spin_lock_bh(&bat_priv->gw.list_lock);
+	cb->seq = bat_priv->gw.generation << 1 | 1;
+
+	hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
 		if (idx++ < idx_skip)
 			continue;
 
-		if (batadv_iv_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
-					    bat_priv, gw_node)) {
+		if (batadv_iv_gw_dump_entry(msg, portid, cb, bat_priv,
+					    gw_node)) {
 			idx_skip = idx - 1;
 			goto unlock;
 		}
@@ -2682,7 +2687,7 @@ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 	idx_skip = idx;
 unlock:
-	rcu_read_unlock();
+	spin_unlock_bh(&bat_priv->gw.list_lock);
 
 	cb->args[0] = idx_skip;
 }
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 6baec4e..90e33f8 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -27,11 +27,13 @@
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
+#include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 #include <linux/seq_file.h>
+#include <linux/spinlock.h>
 #include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
@@ -915,13 +917,14 @@ static void batadv_v_gw_print(struct batadv_priv *bat_priv,
  * batadv_v_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
  * @gw_node: Gateway to be dumped
  *
  * Return: Error code, or 0 on success
  */
-static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid,
+				  struct netlink_callback *cb,
 				  struct batadv_priv *bat_priv,
 				  struct batadv_gw_node *gw_node)
 {
@@ -941,13 +944,16 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 
 	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
-			  NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS);
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family, NLM_F_MULTI,
+			  BATADV_CMD_GET_GATEWAYS);
 	if (!hdr) {
 		ret = -ENOBUFS;
 		goto out;
 	}
 
+	genl_dump_check_consistent(cb, hdr);
+
 	ret = -EMSGSIZE;
 
 	if (curr_gw == gw_node) {
@@ -1018,13 +1024,15 @@ static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
 	int idx_skip = cb->args[0];
 	int idx = 0;
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
+	spin_lock_bh(&bat_priv->gw.list_lock);
+	cb->seq = bat_priv->gw.generation << 1 | 1;
+
+	hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
 		if (idx++ < idx_skip)
 			continue;
 
-		if (batadv_v_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
-					   bat_priv, gw_node)) {
+		if (batadv_v_gw_dump_entry(msg, portid, cb, bat_priv,
+					   gw_node)) {
 			idx_skip = idx - 1;
 			goto unlock;
 		}
@@ -1032,7 +1040,7 @@ static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 	idx_skip = idx;
 unlock:
-	rcu_read_unlock();
+	spin_unlock_bh(&bat_priv->gw.list_lock);
 
 	cb->args[0] = idx_skip;
 }
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5f1aeed..5fdde29 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -2094,14 +2094,15 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @primary_if: primary interface
  * @claim: entry to dump
  *
  * Return: 0 or error code.
  */
 static int
-batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+			    struct netlink_callback *cb,
 			    struct batadv_hard_iface *primary_if,
 			    struct batadv_bla_claim *claim)
 {
@@ -2111,13 +2112,16 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 	void *hdr;
 	int ret = -EINVAL;
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
-			  NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family, NLM_F_MULTI,
+			  BATADV_CMD_GET_BLA_CLAIM);
 	if (!hdr) {
 		ret = -ENOBUFS;
 		goto out;
 	}
 
+	genl_dump_check_consistent(cb, hdr);
+
 	is_own = batadv_compare_eth(claim->backbone_gw->orig,
 				    primary_addr);
 
@@ -2153,28 +2157,33 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @primary_if: primary interface
- * @head: bucket to dump
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
  * @idx_skip: How many entries to skip
  *
  * Return: always 0.
  */
 static int
-batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
+			     struct netlink_callback *cb,
 			     struct batadv_hard_iface *primary_if,
-			     struct hlist_head *head, int *idx_skip)
+			     struct batadv_hashtable *hash, unsigned int bucket,
+			     int *idx_skip)
 {
 	struct batadv_bla_claim *claim;
 	int idx = 0;
 	int ret = 0;
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(claim, head, hash_entry) {
+	spin_lock_bh(&hash->list_locks[bucket]);
+	cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+	hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
 		if (idx++ < *idx_skip)
 			continue;
 
-		ret = batadv_bla_claim_dump_entry(msg, portid, seq,
+		ret = batadv_bla_claim_dump_entry(msg, portid, cb,
 						  primary_if, claim);
 		if (ret) {
 			*idx_skip = idx - 1;
@@ -2184,7 +2193,7 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 
 	*idx_skip = 0;
 unlock:
-	rcu_read_unlock();
+	spin_unlock_bh(&hash->list_locks[bucket]);
 	return ret;
 }
 
@@ -2204,7 +2213,6 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	struct batadv_hashtable *hash;
 	struct batadv_priv *bat_priv;
 	int bucket = cb->args[0];
-	struct hlist_head *head;
 	int idx = cb->args[1];
 	int ifindex;
 	int ret = 0;
@@ -2230,11 +2238,8 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	}
 
 	while (bucket < hash->size) {
-		head = &hash->table[bucket];
-
-		if (batadv_bla_claim_dump_bucket(msg, portid,
-						 cb->nlh->nlmsg_seq,
-						 primary_if, head, &idx))
+		if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
+						 hash, bucket, &idx))
 			break;
 		bucket++;
 	}
@@ -2325,14 +2330,15 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
  *  netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @primary_if: primary interface
  * @backbone_gw: entry to dump
  *
  * Return: 0 or error code.
  */
 static int
-batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
+			       struct netlink_callback *cb,
 			       struct batadv_hard_iface *primary_if,
 			       struct batadv_bla_backbone_gw *backbone_gw)
 {
@@ -2343,13 +2349,16 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 	void *hdr;
 	int ret = -EINVAL;
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
-			  NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE);
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family, NLM_F_MULTI,
+			  BATADV_CMD_GET_BLA_BACKBONE);
 	if (!hdr) {
 		ret = -ENOBUFS;
 		goto out;
 	}
 
+	genl_dump_check_consistent(cb, hdr);
+
 	is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
 
 	spin_lock_bh(&backbone_gw->crc_lock);
@@ -2386,28 +2395,33 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
  *  a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @primary_if: primary interface
- * @head: bucket to dump
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
  * @idx_skip: How many entries to skip
  *
  * Return: always 0.
  */
 static int
-batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
+				struct netlink_callback *cb,
 				struct batadv_hard_iface *primary_if,
-				struct hlist_head *head, int *idx_skip)
+				struct batadv_hashtable *hash,
+				unsigned int bucket, int *idx_skip)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 	int idx = 0;
 	int ret = 0;
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
+	spin_lock_bh(&hash->list_locks[bucket]);
+	cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+	hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
 		if (idx++ < *idx_skip)
 			continue;
 
-		ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
+		ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
 						     primary_if, backbone_gw);
 		if (ret) {
 			*idx_skip = idx - 1;
@@ -2417,7 +2431,7 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 
 	*idx_skip = 0;
 unlock:
-	rcu_read_unlock();
+	spin_unlock_bh(&hash->list_locks[bucket]);
 	return ret;
 }
 
@@ -2437,7 +2451,6 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	struct batadv_hashtable *hash;
 	struct batadv_priv *bat_priv;
 	int bucket = cb->args[0];
-	struct hlist_head *head;
 	int idx = cb->args[1];
 	int ifindex;
 	int ret = 0;
@@ -2463,11 +2476,8 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	}
 
 	while (bucket < hash->size) {
-		head = &hash->table[bucket];
-
-		if (batadv_bla_backbone_dump_bucket(msg, portid,
-						    cb->nlh->nlmsg_seq,
-						    primary_if, head, &idx))
+		if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
+						    hash, bucket, &idx))
 			break;
 		bucket++;
 	}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 8b608a2..d4a7702 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -19,6 +19,7 @@
 #include "debugfs.h"
 #include "main.h"
 
+#include <asm/current.h>
 #include <linux/dcache.h>
 #include <linux/debugfs.h>
 #include <linux/err.h>
@@ -27,6 +28,7 @@
 #include <linux/fs.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
+#include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/stddef.h>
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index a60bacf..b9ffe18 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -863,23 +863,27 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
  *  netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @dat_entry: entry to dump
  *
  * Return: 0 or error code.
  */
 static int
-batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid,
+			    struct netlink_callback *cb,
 			    struct batadv_dat_entry *dat_entry)
 {
 	int msecs;
 	void *hdr;
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
-			  NLM_F_MULTI, BATADV_CMD_GET_DAT_CACHE);
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family, NLM_F_MULTI,
+			  BATADV_CMD_GET_DAT_CACHE);
 	if (!hdr)
 		return -ENOBUFS;
 
+	genl_dump_check_consistent(cb, hdr);
+
 	msecs = jiffies_to_msecs(jiffies - dat_entry->last_update);
 
 	if (nla_put_in_addr(msg, BATADV_ATTR_DAT_CACHE_IP4ADDRESS,
@@ -901,27 +905,31 @@ batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
  *  a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
- * @head: bucket to dump
+ * @cb: Control block containing additional options
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
  * @idx_skip: How many entries to skip
  *
  * Return: 0 or error code.
  */
 static int
-batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
-			     struct hlist_head *head, int *idx_skip)
+batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid,
+			     struct netlink_callback *cb,
+			     struct batadv_hashtable *hash, unsigned int bucket,
+			     int *idx_skip)
 {
 	struct batadv_dat_entry *dat_entry;
 	int idx = 0;
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
+	spin_lock_bh(&hash->list_locks[bucket]);
+	cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+	hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) {
 		if (idx < *idx_skip)
 			goto skip;
 
-		if (batadv_dat_cache_dump_entry(msg, portid, seq,
-						dat_entry)) {
-			rcu_read_unlock();
+		if (batadv_dat_cache_dump_entry(msg, portid, cb, dat_entry)) {
+			spin_unlock_bh(&hash->list_locks[bucket]);
 			*idx_skip = idx;
 
 			return -EMSGSIZE;
@@ -930,7 +938,7 @@ batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 skip:
 		idx++;
 	}
-	rcu_read_unlock();
+	spin_unlock_bh(&hash->list_locks[bucket]);
 
 	return 0;
 }
@@ -951,7 +959,6 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	struct batadv_hashtable *hash;
 	struct batadv_priv *bat_priv;
 	int bucket = cb->args[0];
-	struct hlist_head *head;
 	int idx = cb->args[1];
 	int ifindex;
 	int ret = 0;
@@ -977,10 +984,7 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	}
 
 	while (bucket < hash->size) {
-		head = &hash->table[bucket];
-
-		if (batadv_dat_cache_dump_bucket(msg, portid,
-						 cb->nlh->nlmsg_seq, head,
+		if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket,
 						 &idx))
 			break;
 
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 140c61a..9d8e5ed 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -377,6 +377,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 
 	kref_get(&gw_node->refcount);
 	hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
+	bat_priv->gw.generation++;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -472,6 +473,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
 		if (!hlist_unhashed(&gw_node->list)) {
 			hlist_del_init_rcu(&gw_node->list);
 			batadv_gw_node_put(gw_node);
+			bat_priv->gw.generation++;
 		}
 		spin_unlock_bh(&bat_priv->gw.list_lock);
 
@@ -518,6 +520,7 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
 				  &bat_priv->gw.gateway_list, list) {
 		hlist_del_init_rcu(&gw_node->list);
 		batadv_gw_node_put(gw_node);
+		bat_priv->gw.generation++;
 	}
 	spin_unlock_bh(&bat_priv->gw.list_lock);
 }
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 781c5b6..508f441 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -951,6 +951,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
 	batadv_check_known_mac_addr(hard_iface->net_dev);
 	kref_get(&hard_iface->refcount);
 	list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
+	batadv_hardif_generation++;
 
 	return hard_iface;
 
@@ -993,6 +994,7 @@ void batadv_hardif_remove_interfaces(void)
 	list_for_each_entry_safe(hard_iface, hard_iface_tmp,
 				 &batadv_hardif_list, list) {
 		list_del_rcu(&hard_iface->list);
+		batadv_hardif_generation++;
 		batadv_hardif_remove_interface(hard_iface);
 	}
 	rtnl_unlock();
@@ -1054,6 +1056,7 @@ static int batadv_hard_if_event(struct notifier_block *this,
 	case NETDEV_UNREGISTER:
 	case NETDEV_PRE_TYPE_CHANGE:
 		list_del_rcu(&hard_iface->list);
+		batadv_hardif_generation++;
 
 		batadv_hardif_remove_interface(hard_iface);
 		break;
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 7b49e40..9194f4d 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -32,6 +32,8 @@ static void batadv_hash_init(struct batadv_hashtable *hash)
 		INIT_HLIST_HEAD(&hash->table[i]);
 		spin_lock_init(&hash->list_locks[i]);
 	}
+
+	atomic_set(&hash->generation, 0);
 }
 
 /**
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 9490a7c..0e36fa1 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -21,6 +21,7 @@
 
 #include "main.h"
 
+#include <linux/atomic.h>
 #include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/rculist.h>
@@ -58,6 +59,9 @@ struct batadv_hashtable {
 
 	/** @size: size of hashtable */
 	u32 size;
+
+	/** @generation: current (generation) sequence number */
+	atomic_t generation;
 };
 
 /* allocates and clears the hash */
@@ -112,6 +116,7 @@ static inline int batadv_hash_add(struct batadv_hashtable *hash,
 
 	/* no duplicate found in list, add new element */
 	hlist_add_head_rcu(data_node, head);
+	atomic_inc(&hash->generation);
 
 	ret = 0;
 
@@ -154,6 +159,7 @@ static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
 
 		data_save = node;
 		hlist_del_rcu(node);
+		atomic_inc(&hash->generation);
 		break;
 	}
 	spin_unlock_bh(&hash->list_locks[index]);
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 6beb5f0..02e55b7 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -43,6 +43,8 @@
 #include "debugfs.h"
 #include "trace.h"
 
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
 #define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
 
 static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
@@ -92,33 +94,6 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
 	return 0;
 }
 
-/**
- * batadv_debug_log() - Add debug log entry
- * @bat_priv: the bat priv with all the soft interface information
- * @fmt: format string
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	batadv_fdebug_log(bat_priv->debug_log, "[%10u] %pV",
-			  jiffies_to_msecs(jiffies), &vaf);
-
-	trace_batadv_dbg(bat_priv, &vaf);
-
-	va_end(args);
-
-	return 0;
-}
-
 static int batadv_log_open(struct inode *inode, struct file *file)
 {
 	if (!try_module_get(THIS_MODULE))
@@ -259,3 +234,34 @@ void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 	kfree(bat_priv->debug_log);
 	bat_priv->debug_log = NULL;
 }
+
+#endif /* CONFIG_BATMAN_ADV_DEBUGFS */
+
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+	batadv_fdebug_log(bat_priv->debug_log, "[%10u] %pV",
+			  jiffies_to_msecs(jiffies), &vaf);
+#endif
+
+	trace_batadv_dbg(bat_priv, &vaf);
+
+	va_end(args);
+
+	return 0;
+}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 69c0d85..d1ed839 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -74,6 +74,7 @@
  * list traversals just rcu-locked
  */
 struct list_head batadv_hardif_list;
+unsigned int batadv_hardif_generation;
 static int (*batadv_rx_handler[256])(struct sk_buff *skb,
 				     struct batadv_hard_iface *recv_if);
 
@@ -186,6 +187,8 @@ int batadv_mesh_init(struct net_device *soft_iface)
 	INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
 	INIT_HLIST_HEAD(&bat_priv->tp_list);
 
+	bat_priv->gw.generation = 0;
+
 	ret = batadv_v_mesh_init(bat_priv);
 	if (ret < 0)
 		goto err;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 2002b70..b572066 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -25,7 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2018.4"
+#define BATADV_SOURCE_VERSION "2019.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -247,6 +247,7 @@ static inline int batadv_print_vid(unsigned short vid)
 }
 
 extern struct list_head batadv_hardif_list;
+extern unsigned int batadv_hardif_generation;
 
 extern unsigned char batadv_broadcast_addr[];
 extern struct workqueue_struct *batadv_event_workqueue;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 86725d7..69244e4 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1365,22 +1365,26 @@ int batadv_mcast_mesh_info_put(struct sk_buff *msg,
  *  to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @orig_node: originator to dump the multicast flags of
  *
  * Return: 0 or error code.
  */
 static int
-batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
+			      struct netlink_callback *cb,
 			      struct batadv_orig_node *orig_node)
 {
 	void *hdr;
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
-			  NLM_F_MULTI, BATADV_CMD_GET_MCAST_FLAGS);
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family, NLM_F_MULTI,
+			  BATADV_CMD_GET_MCAST_FLAGS);
 	if (!hdr)
 		return -ENOBUFS;
 
+	genl_dump_check_consistent(cb, hdr);
+
 	if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
 		    orig_node->orig)) {
 		genlmsg_cancel(msg, hdr);
@@ -1405,21 +1409,26 @@ batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
  *  table to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
- * @head: bucket to dump
+ * @cb: Control block containing additional options
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
  * @idx_skip: How many entries to skip
  *
  * Return: 0 or error code.
  */
 static int
-batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
-			       struct hlist_head *head, long *idx_skip)
+batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
+			       struct netlink_callback *cb,
+			       struct batadv_hashtable *hash,
+			       unsigned int bucket, long *idx_skip)
 {
 	struct batadv_orig_node *orig_node;
 	long idx = 0;
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+	spin_lock_bh(&hash->list_locks[bucket]);
+	cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+	hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
 		if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
 			      &orig_node->capa_initialized))
 			continue;
@@ -1427,9 +1436,8 @@ batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 		if (idx < *idx_skip)
 			goto skip;
 
-		if (batadv_mcast_flags_dump_entry(msg, portid, seq,
-						  orig_node)) {
-			rcu_read_unlock();
+		if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
+			spin_unlock_bh(&hash->list_locks[bucket]);
 			*idx_skip = idx;
 
 			return -EMSGSIZE;
@@ -1438,7 +1446,7 @@ batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 skip:
 		idx++;
 	}
-	rcu_read_unlock();
+	spin_unlock_bh(&hash->list_locks[bucket]);
 
 	return 0;
 }
@@ -1447,7 +1455,7 @@ batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
  * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @bat_priv: the bat priv with all the soft interface information
  * @bucket: current bucket to dump
  * @idx: index in current bucket to the next entry to dump
@@ -1455,19 +1463,17 @@ batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
  * Return: 0 or error code.
  */
 static int
-__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, u32 seq,
+__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
+			  struct netlink_callback *cb,
 			  struct batadv_priv *bat_priv, long *bucket, long *idx)
 {
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	long bucket_tmp = *bucket;
-	struct hlist_head *head;
 	long idx_tmp = *idx;
 
 	while (bucket_tmp < hash->size) {
-		head = &hash->table[bucket_tmp];
-
-		if (batadv_mcast_flags_dump_bucket(msg, portid, seq, head,
-						   &idx_tmp))
+		if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
+						   *bucket, &idx_tmp))
 			break;
 
 		bucket_tmp++;
@@ -1550,8 +1556,7 @@ int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
 		return ret;
 
 	bat_priv = netdev_priv(primary_if->soft_iface);
-	ret = __batadv_mcast_flags_dump(msg, portid, cb->nlh->nlmsg_seq,
-					bat_priv, bucket, idx);
+	ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
 
 	batadv_hardif_put(primary_if);
 	return ret;
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 0d9459b..2dc3304 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -29,11 +29,11 @@
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/printk.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/stddef.h>
 #include <linux/types.h>
@@ -445,23 +445,27 @@ batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info)
  * batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @hard_iface: Hard interface to dump
  *
  * Return: error code, or 0 on success
  */
 static int
-batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid,
+				 struct netlink_callback *cb,
 				 struct batadv_hard_iface *hard_iface)
 {
 	struct net_device *net_dev = hard_iface->net_dev;
 	void *hdr;
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family, NLM_F_MULTI,
 			  BATADV_CMD_GET_HARDIFS);
 	if (!hdr)
 		return -EMSGSIZE;
 
+	genl_dump_check_consistent(cb, hdr);
+
 	if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
 			net_dev->ifindex) ||
 	    nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
@@ -498,7 +502,6 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
 	struct batadv_hard_iface *hard_iface;
 	int ifindex;
 	int portid = NETLINK_CB(cb->skb).portid;
-	int seq = cb->nlh->nlmsg_seq;
 	int skip = cb->args[0];
 	int i = 0;
 
@@ -516,23 +519,24 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
 		return -ENODEV;
 	}
 
-	rcu_read_lock();
+	rtnl_lock();
+	cb->seq = batadv_hardif_generation << 1 | 1;
 
-	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+	list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
 		if (hard_iface->soft_iface != soft_iface)
 			continue;
 
 		if (i++ < skip)
 			continue;
 
-		if (batadv_netlink_dump_hardif_entry(msg, portid, seq,
+		if (batadv_netlink_dump_hardif_entry(msg, portid, cb,
 						     hard_iface)) {
 			i--;
 			break;
 		}
 	}
 
-	rcu_read_unlock();
+	rtnl_unlock();
 
 	dev_put(soft_iface);
 
diff --git a/net/batman-adv/trace.c b/net/batman-adv/trace.c
index 3d57f99..8e10242 100644
--- a/net/batman-adv/trace.c
+++ b/net/batman-adv/trace.c
@@ -16,7 +16,5 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/module.h>
-
 #define CREATE_TRACE_POINTS
 #include "trace.h"
diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h
index 3acda26..104784b 100644
--- a/net/batman-adv/trace.h
+++ b/net/batman-adv/trace.h
@@ -21,7 +21,13 @@
 
 #include "main.h"
 
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
 #include <linux/tracepoint.h>
+#include <linux/types.h>
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM batadv
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d21624c..8dcd496 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1145,14 +1145,15 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
  * batadv_tt_local_dump_entry() - Dump one TT local entry into a message
  * @msg :Netlink message to dump into
  * @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
  * @common: tt local & tt global common data
  *
  * Return: Error code, or 0 on success
  */
 static int
-batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid,
+			   struct netlink_callback *cb,
 			   struct batadv_priv *bat_priv,
 			   struct batadv_tt_common_entry *common)
 {
@@ -1173,12 +1174,14 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 
 	batadv_softif_vlan_put(vlan);
 
-	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
-			  NLM_F_MULTI,
+	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+			  &batadv_netlink_family,  NLM_F_MULTI,
 			  BATADV_CMD_GET_TRANSTABLE_LOCAL);
 	if (!hdr)
 		return -ENOBUFS;
 
+	genl_dump_check_consistent(cb, hdr);
+
 	if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
 	    nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
 	    nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
@@ -1201,34 +1204,39 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
  * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
- * @head: Pointer to the list containing the local tt entries
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
  * @idx_s: Number of entries to skip
  *
  * Return: Error code, or 0 on success
  */
 static int
-batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid,
+			    struct netlink_callback *cb,
 			    struct batadv_priv *bat_priv,
-			    struct hlist_head *head, int *idx_s)
+			    struct batadv_hashtable *hash, unsigned int bucket,
+			    int *idx_s)
 {
 	struct batadv_tt_common_entry *common;
 	int idx = 0;
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(common, head, hash_entry) {
+	spin_lock_bh(&hash->list_locks[bucket]);
+	cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+	hlist_for_each_entry(common, &hash->table[bucket], hash_entry) {
 		if (idx++ < *idx_s)
 			continue;
 
-		if (batadv_tt_local_dump_entry(msg, portid, seq, bat_priv,
+		if (batadv_tt_local_dump_entry(msg, portid, cb, bat_priv,
 					       common)) {
-			rcu_read_unlock();
+			spin_unlock_bh(&hash->list_locks[bucket]);
 			*idx_s = idx - 1;
 			return -EMSGSIZE;
 		}
 	}
-	rcu_read_unlock();
+	spin_unlock_bh(&hash->list_locks[bucket]);
 
 	*idx_s = 0;
 	return 0;
@@ -1248,7 +1256,6 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	struct batadv_priv *bat_priv;
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_hashtable *hash;
-	struct hlist_head *head;
 	int ret;
 	int ifindex;
 	int bucket = cb->args[0];
@@ -1276,10 +1283,8 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	hash = bat_priv->tt.local_hash;
 
 	while (bucket < hash->size) {
-		head = &hash->table[bucket];
-
-		if (batadv_tt_local_dump_bucket(msg, portid, cb->nlh->nlmsg_seq,
-						bat_priv, head, &idx))
+		if (batadv_tt_local_dump_bucket(msg, portid, cb, bat_priv,
+						hash, bucket, &idx))
 			break;
 
 		bucket++;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 45b5592..cbe17da 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1096,12 +1096,15 @@ struct batadv_priv_gw {
 	/** @gateway_list: list of available gateway nodes */
 	struct hlist_head gateway_list;
 
-	/** @list_lock: lock protecting gateway_list & curr_gw */
+	/** @list_lock: lock protecting gateway_list, curr_gw, generation */
 	spinlock_t list_lock;
 
 	/** @curr_gw: pointer to currently selected gateway node */
 	struct batadv_gw_node __rcu *curr_gw;
 
+	/** @generation: current (generation) sequence number */
+	unsigned int generation;
+
 	/**
 	 * @mode: gateway operation: off, client or server (see batadv_gw_modes)
 	 */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 9b46d2d..d4863f56 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -741,3 +741,15 @@ void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
 	if (mask & BR_NEIGH_SUPPRESS)
 		br_recalculate_neigh_suppress_enabled(br);
 }
+
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+	struct net_bridge_port *p;
+
+	p = br_port_get_rtnl_rcu(dev);
+	if (!p)
+		return false;
+
+	return p->flags & flag;
+}
+EXPORT_SYMBOL_GPL(br_port_flag_is_set);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index b1b5e85..c9383c4 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -671,10 +671,8 @@ static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff
 		return 0;
 	}
 
-	if (data->vlan_tci) {
-		skb->vlan_tci = data->vlan_tci;
-		skb->vlan_proto = data->vlan_proto;
-	}
+	if (data->vlan_proto)
+		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
 
 	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
 	__skb_push(skb, data->encap_size);
@@ -740,8 +738,13 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
 
 		data = this_cpu_ptr(&brnf_frag_data_storage);
 
-		data->vlan_tci = skb->vlan_tci;
-		data->vlan_proto = skb->vlan_proto;
+		if (skb_vlan_tag_present(skb)) {
+			data->vlan_tci = skb->vlan_tci;
+			data->vlan_proto = skb->vlan_proto;
+		} else {
+			data->vlan_proto = 0;
+		}
+
 		data->encap_size = nf_bridge_encap_header_len(skb);
 		data->size = ETH_HLEN + data->encap_size;
 
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 04c19a3..bc26537 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -912,7 +912,7 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
 	int err = 0;
 
 	if (skb_vlan_tag_present(skb)) {
-		*vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
+		*vid = skb_vlan_tag_get_id(skb);
 	} else {
 		*vid = 0;
 		err = -EINVAL;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index e84be08..b21838b 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -421,7 +421,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
 	}
 
 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
-		skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(skb);
 
 	if (p && (p->flags & BR_VLAN_TUNNEL) &&
 	    br_handle_egress_vlan_tunnel(skb, v)) {
@@ -494,8 +494,8 @@ static bool __allowed_ingress(const struct net_bridge *br,
 			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 		else
 			/* Priority-tagged Frame.
-			 * At this point, We know that skb->vlan_tci had
-			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+			 * At this point, we know that skb->vlan_tci VID
+			 * field was 0.
 			 * We update only VID field and preserve PCP field.
 			 */
 			skb->vlan_tci |= pvid;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 57f3a6fc..4bf62b1 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -728,49 +728,6 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
 	return -EFAULT;
 }
 
-__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
-{
-	__sum16 sum;
-
-	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
-	if (likely(!sum)) {
-		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
-		    !skb->csum_complete_sw)
-			netdev_rx_csum_fault(skb->dev);
-	}
-	if (!skb_shared(skb))
-		skb->csum_valid = !sum;
-	return sum;
-}
-EXPORT_SYMBOL(__skb_checksum_complete_head);
-
-__sum16 __skb_checksum_complete(struct sk_buff *skb)
-{
-	__wsum csum;
-	__sum16 sum;
-
-	csum = skb_checksum(skb, 0, skb->len, 0);
-
-	/* skb->csum holds pseudo checksum */
-	sum = csum_fold(csum_add(skb->csum, csum));
-	if (likely(!sum)) {
-		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
-		    !skb->csum_complete_sw)
-			netdev_rx_csum_fault(skb->dev);
-	}
-
-	if (!skb_shared(skb)) {
-		/* Save full packet checksum */
-		skb->csum = csum;
-		skb->ip_summed = CHECKSUM_COMPLETE;
-		skb->csum_complete_sw = 1;
-		skb->csum_valid = !sum;
-	}
-
-	return sum;
-}
-EXPORT_SYMBOL(__skb_checksum_complete);
-
 /**
  *	skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
  *	@skb: skbuff
@@ -810,7 +767,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
 
 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 		    !skb->csum_complete_sw)
-			netdev_rx_csum_fault(NULL);
+			netdev_rx_csum_fault(NULL, skb);
 	}
 	return 0;
 fault:
diff --git a/net/core/dev.c b/net/core/dev.c
index ddc551f..f69b2fc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3091,10 +3091,17 @@ EXPORT_SYMBOL(__skb_gso_segment);
 
 /* Take action when hardware reception checksum errors are detected. */
 #ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev)
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
 {
 	if (net_ratelimit()) {
 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+		if (dev)
+			pr_err("dev features: %pNF\n", &dev->features);
+		pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n",
+		       skb->len, skb->data_len, skb->pkt_type,
+		       skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type,
+		       skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum,
+		       skb->csum_complete_sw, skb->csum_valid, skb->csum_level);
 		dump_stack();
 	}
 }
@@ -4889,7 +4896,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
 		 * and set skb->priority like in vlan_do_receive()
 		 * For the time being, just ignore Priority Code Point
 		 */
-		skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(skb);
 	}
 
 	type = skb->protocol;
@@ -5357,11 +5364,13 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
  */
 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
 {
-	u32 i;
+	unsigned long bitmask = napi->gro_bitmask;
+	unsigned int i, base = ~0U;
 
-	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
-		if (test_bit(i, &napi->gro_bitmask))
-			__napi_gro_flush_chain(napi, i, flush_old);
+	while ((i = ffs(bitmask)) != 0) {
+		bitmask >>= i;
+		base += i;
+		__napi_gro_flush_chain(napi, base, flush_old);
 	}
 }
 EXPORT_SYMBOL(napi_gro_flush);
@@ -5386,7 +5395,9 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
 		}
 
 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
-		diffs |= p->vlan_tci ^ skb->vlan_tci;
+		diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
+		if (skb_vlan_tag_present(p))
+			diffs |= p->vlan_tci ^ skb->vlan_tci;
 		diffs |= skb_metadata_dst_cmp(p, skb);
 		diffs |= skb_metadata_differs(p, skb);
 		if (maclen == ETH_HLEN)
@@ -5652,7 +5663,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
 	__skb_pull(skb, skb_headlen(skb));
 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
-	skb->vlan_tci = 0;
+	__vlan_hwaccel_clear_tag(skb);
 	skb->dev = napi->dev;
 	skb->skb_iif = 0;
 
@@ -5783,7 +5794,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
 	if (likely(!sum)) {
 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 		    !skb->csum_complete_sw)
-			netdev_rx_csum_fault(skb->dev);
+			netdev_rx_csum_fault(skb->dev, skb);
 	}
 
 	NAPI_GRO_CB(skb)->csum = wsum;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index d884d8f..81a8cd4e 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -278,6 +278,103 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
 EXPORT_SYMBOL(__hw_addr_sync_dev);
 
 /**
+ *  __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking
+ *  into account references
+ *  @list: address list to synchronize
+ *  @dev:  device to sync
+ *  @sync: function to call if address or reference on it should be added
+ *  @unsync: function to call if address or some reference on it should removed
+ *
+ *  This function is intended to be called from the ndo_set_rx_mode
+ *  function of devices that require explicit address or references on it
+ *  add/remove notifications. The unsync function may be NULL in which case
+ *  the addresses or references on it requiring removal will simply be
+ *  removed without any notification to the device. That is responsibility of
+ *  the driver to identify and distribute address or references on it between
+ *  internal address tables.
+ **/
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+			   struct net_device *dev,
+			   int (*sync)(struct net_device *,
+				       const unsigned char *, int),
+			   int (*unsync)(struct net_device *,
+					 const unsigned char *, int))
+{
+	struct netdev_hw_addr *ha, *tmp;
+	int err, ref_cnt;
+
+	/* first go through and flush out any unsynced/stale entries */
+	list_for_each_entry_safe(ha, tmp, &list->list, list) {
+		/* sync if address is not used */
+		if ((ha->sync_cnt << 1) <= ha->refcount)
+			continue;
+
+		/* if fails defer unsyncing address */
+		ref_cnt = ha->refcount - ha->sync_cnt;
+		if (unsync && unsync(dev, ha->addr, ref_cnt))
+			continue;
+
+		ha->refcount = (ref_cnt << 1) + 1;
+		ha->sync_cnt = ref_cnt;
+		__hw_addr_del_entry(list, ha, false, false);
+	}
+
+	/* go through and sync updated/new entries to the list */
+	list_for_each_entry_safe(ha, tmp, &list->list, list) {
+		/* sync if address added or reused */
+		if ((ha->sync_cnt << 1) >= ha->refcount)
+			continue;
+
+		ref_cnt = ha->refcount - ha->sync_cnt;
+		err = sync(dev, ha->addr, ref_cnt);
+		if (err)
+			return err;
+
+		ha->refcount = ref_cnt << 1;
+		ha->sync_cnt = ref_cnt;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
+
+/**
+ *  __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on
+ *  it from device
+ *  @list: address list to remove synchronized addresses (references on it) from
+ *  @dev:  device to sync
+ *  @unsync: function to call if address and references on it should be removed
+ *
+ *  Remove all addresses that were added to the device by
+ *  __hw_addr_ref_sync_dev(). This function is intended to be called from the
+ *  ndo_stop or ndo_open functions on devices that require explicit address (or
+ *  references on it) add/remove notifications. If the unsync function pointer
+ *  is NULL then this function can be used to just reset the sync_cnt for the
+ *  addresses in the list.
+ **/
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+			      struct net_device *dev,
+			      int (*unsync)(struct net_device *,
+					    const unsigned char *, int))
+{
+	struct netdev_hw_addr *ha, *tmp;
+
+	list_for_each_entry_safe(ha, tmp, &list->list, list) {
+		if (!ha->sync_cnt)
+			continue;
+
+		/* if fails defer unsyncing address */
+		if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
+			continue;
+
+		ha->refcount -= ha->sync_cnt - 1;
+		ha->sync_cnt = 0;
+		__hw_addr_del_entry(list, ha, false, false);
+	}
+}
+EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
+
+/**
  *  __hw_addr_unsync_dev - Remove synchronized addresses from device
  *  @list: address list to remove synchronized addresses from
  *  @dev:  device to sync
diff --git a/net/core/filter.c b/net/core/filter.c
index e521c5e..10acbc0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -296,22 +296,18 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
 		break;
 
 	case SKF_AD_VLAN_TAG:
-	case SKF_AD_VLAN_TAG_PRESENT:
 		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
-		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 
 		/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
 		*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
 				      offsetof(struct sk_buff, vlan_tci));
-		if (skb_field == SKF_AD_VLAN_TAG) {
-			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
-						~VLAN_TAG_PRESENT);
-		} else {
-			/* dst_reg >>= 12 */
-			*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
-			/* dst_reg &= 1 */
+		break;
+	case SKF_AD_VLAN_TAG_PRESENT:
+		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
+		if (PKT_VLAN_PRESENT_BIT)
+			*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
+		if (PKT_VLAN_PRESENT_BIT < 7)
 			*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
-		}
 		break;
 	}
 
@@ -6140,19 +6136,19 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 		break;
 
 	case offsetof(struct __sk_buff, vlan_present):
-	case offsetof(struct __sk_buff, vlan_tci):
-		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+		*target_size = 1;
+		*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
+				      PKT_VLAN_PRESENT_OFFSET());
+		if (PKT_VLAN_PRESENT_BIT)
+			*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
+		if (PKT_VLAN_PRESENT_BIT < 7)
+			*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
+		break;
 
+	case offsetof(struct __sk_buff, vlan_tci):
 		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
 				      bpf_target_off(struct sk_buff, vlan_tci, 2,
 						     target_size));
-		if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
-			*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
-						~VLAN_TAG_PRESENT);
-		} else {
-			*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
-			*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
-		}
 		break;
 
 	case offsetof(struct __sk_buff, cb[0]) ...
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 588f475..2e8d91e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -952,8 +952,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 
 			if (!vlan) {
 				key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
-				key_vlan->vlan_priority =
-					(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
+				key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
 			} else {
 				key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
 					VLAN_VID_MASK;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33d9227..86f2d9c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2885,9 +2885,11 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
 }
 EXPORT_SYMBOL(rtnl_configure_link);
 
-struct net_device *rtnl_create_link(struct net *net,
-	const char *ifname, unsigned char name_assign_type,
-	const struct rtnl_link_ops *ops, struct nlattr *tb[])
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+				    unsigned char name_assign_type,
+				    const struct rtnl_link_ops *ops,
+				    struct nlattr *tb[],
+				    struct netlink_ext_ack *extack)
 {
 	struct net_device *dev;
 	unsigned int num_tx_queues = 1;
@@ -2903,11 +2905,15 @@ struct net_device *rtnl_create_link(struct net *net,
 	else if (ops->get_num_rx_queues)
 		num_rx_queues = ops->get_num_rx_queues();
 
-	if (num_tx_queues < 1 || num_tx_queues > 4096)
+	if (num_tx_queues < 1 || num_tx_queues > 4096) {
+		NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
 		return ERR_PTR(-EINVAL);
+	}
 
-	if (num_rx_queues < 1 || num_rx_queues > 4096)
+	if (num_rx_queues < 1 || num_rx_queues > 4096) {
+		NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
 		return ERR_PTR(-EINVAL);
+	}
 
 	dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
 			       ops->setup, num_tx_queues, num_rx_queues);
@@ -3048,7 +3054,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 			if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
 				err = nla_parse_nested(attr, ops->maxtype,
 						       linkinfo[IFLA_INFO_DATA],
-						       ops->policy, NULL);
+						       ops->policy, extack);
 				if (err < 0)
 					return err;
 				data = attr;
@@ -3070,7 +3076,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 						       m_ops->slave_maxtype,
 						       linkinfo[IFLA_INFO_SLAVE_DATA],
 						       m_ops->slave_policy,
-						       NULL);
+						       extack);
 				if (err < 0)
 					return err;
 				slave_data = slave_attr;
@@ -3134,6 +3140,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 					goto replay;
 			}
 #endif
+			NL_SET_ERR_MSG(extack, "Unknown device type");
 			return -EOPNOTSUPP;
 		}
 
@@ -3154,6 +3161,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 			link_net = get_net_ns_by_id(dest_net, id);
 			if (!link_net) {
+				NL_SET_ERR_MSG(extack, "Unknown network namespace id");
 				err =  -EINVAL;
 				goto out;
 			}
@@ -3163,7 +3171,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 		}
 
 		dev = rtnl_create_link(link_net ? : dest_net, ifname,
-				       name_assign_type, ops, tb);
+				       name_assign_type, ops, tb, extack);
 		if (IS_ERR(dev)) {
 			err = PTR_ERR(dev);
 			goto out;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a8217e2..9a8a72c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1925,8 +1925,6 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
 		struct sk_buff *insp = NULL;
 
 		do {
-			BUG_ON(!list);
-
 			if (list->len <= eat) {
 				/* Eaten as whole. */
 				eat -= list->len;
@@ -2366,19 +2364,6 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
 }
 EXPORT_SYMBOL_GPL(skb_send_sock_locked);
 
-/* Send skb data on a socket. */
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
-{
-	int ret = 0;
-
-	lock_sock(sk);
-	ret = skb_send_sock_locked(sk, skb, offset, len);
-	release_sock(sk);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(skb_send_sock);
-
 /**
  *	skb_store_bits - store bits from kernel buffer to skb
  *	@skb: destination buffer
@@ -2645,6 +2630,49 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 }
 EXPORT_SYMBOL(skb_copy_and_csum_bits);
 
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
+{
+	__sum16 sum;
+
+	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
+	if (likely(!sum)) {
+		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+		    !skb->csum_complete_sw)
+			netdev_rx_csum_fault(skb->dev, skb);
+	}
+	if (!skb_shared(skb))
+		skb->csum_valid = !sum;
+	return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete_head);
+
+__sum16 __skb_checksum_complete(struct sk_buff *skb)
+{
+	__wsum csum;
+	__sum16 sum;
+
+	csum = skb_checksum(skb, 0, skb->len, 0);
+
+	/* skb->csum holds pseudo checksum */
+	sum = csum_fold(csum_add(skb->csum, csum));
+	if (likely(!sum)) {
+		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+		    !skb->csum_complete_sw)
+			netdev_rx_csum_fault(skb->dev, skb);
+	}
+
+	if (!skb_shared(skb)) {
+		/* Save full packet checksum */
+		skb->csum = csum;
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		skb->csum_complete_sw = 1;
+		skb->csum_valid = !sum;
+	}
+
+	return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete);
+
 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
 {
 	net_warn_ratelimited(
@@ -5128,7 +5156,7 @@ int skb_vlan_pop(struct sk_buff *skb)
 	int err;
 
 	if (likely(skb_vlan_tag_present(skb))) {
-		skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(skb);
 	} else {
 		if (unlikely(!eth_type_vlan(skb->protocol)))
 			return 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index 080a880..6d7e189 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -567,6 +567,8 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
 
 	lock_sock(sk);
 	sk->sk_bound_dev_if = index;
+	if (sk->sk_prot->rehash)
+		sk->sk_prot->rehash(sk);
 	sk_dst_reset(sk);
 	release_sock(sk);
 
@@ -950,10 +952,12 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
 			clear_bit(SOCK_PASSSEC, &sock->flags);
 		break;
 	case SO_MARK:
-		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
 			ret = -EPERM;
-		else
+		} else if (val != sk->sk_mark) {
 			sk->sk_mark = val;
+			sk_dst_reset(sk);
+		}
 		break;
 
 	case SO_RXQ_OVFL:
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index ba5cba5..d8fe3e5 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -187,6 +187,7 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
 		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
 	return 0;
 }
+EXPORT_SYMBOL(reuseport_add_sock);
 
 void reuseport_detach_sock(struct sock *sk)
 {
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8e08cea6..26a21d9 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -231,7 +231,7 @@ EXPORT_SYMBOL(dccp_req_err);
  * check at all. A more general error queue to queue errors for later handling
  * is probably better.
  */
-static void dccp_v4_err(struct sk_buff *skb, u32 info)
+static int dccp_v4_err(struct sk_buff *skb, u32 info)
 {
 	const struct iphdr *iph = (struct iphdr *)skb->data;
 	const u8 offset = iph->ihl << 2;
@@ -259,16 +259,18 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
 				       inet_iif(skb), 0);
 	if (!sk) {
 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
-		return;
+		return -ENOENT;
 	}
 
 	if (sk->sk_state == DCCP_TIME_WAIT) {
 		inet_twsk_put(inet_twsk(sk));
-		return;
+		return 0;
 	}
 	seq = dccp_hdr_seq(dh);
-	if (sk->sk_state == DCCP_NEW_SYN_RECV)
-		return dccp_req_err(sk, seq);
+	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+		dccp_req_err(sk, seq);
+		return 0;
+	}
 
 	bh_lock_sock(sk);
 	/* If too many ICMPs get dropped on busy
@@ -357,6 +359,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
 out:
 	bh_unlock_sock(sk);
 	sock_put(sk);
+	return 0;
 }
 
 static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6344f1b..d5740ba 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -68,7 +68,7 @@ static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
 
 }
 
-static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			u8 type, u8 code, int offset, __be32 info)
 {
 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
@@ -96,16 +96,18 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	if (!sk) {
 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 				  ICMP6_MIB_INERRORS);
-		return;
+		return -ENOENT;
 	}
 
 	if (sk->sk_state == DCCP_TIME_WAIT) {
 		inet_twsk_put(inet_twsk(sk));
-		return;
+		return 0;
 	}
 	seq = dccp_hdr_seq(dh);
-	if (sk->sk_state == DCCP_NEW_SYN_RECV)
-		return dccp_req_err(sk, seq);
+	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+		dccp_req_err(sk, seq);
+		return 0;
+	}
 
 	bh_lock_sock(sk);
 	if (sock_owned_by_user(sk))
@@ -183,6 +185,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 out:
 	bh_unlock_sock(sk);
 	sock_put(sk);
+	return 0;
 }
 
 
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 43733ac..658cd32b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -948,6 +948,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
 	if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
 		goto out;
 
+	sk->sk_max_ack_backlog = backlog;
 	/* Really, if the socket is already in listen state
 	 * we can only allow the backlog to be adjusted.
 	 */
@@ -960,7 +961,6 @@ int inet_dccp_listen(struct socket *sock, int backlog)
 		if (err)
 			goto out;
 	}
-	sk->sk_max_ack_backlog = backlog;
 	err = 0;
 
 out:
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 7d6ff98..7aab5d0 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -192,7 +192,7 @@ static int check_port(__le16 port)
 static unsigned short port_alloc(struct sock *sk)
 {
 	struct dn_scp *scp = DN_SK(sk);
-static unsigned short port = 0x2000;
+	static unsigned short port = 0x2000;
 	unsigned short i_port = port;
 
 	while(check_port(cpu_to_le16(++port)) != 0) {
diff --git a/net/dsa/port.c b/net/dsa/port.c
index ed05954..2d7e01b 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -252,9 +252,6 @@ int dsa_port_vlan_add(struct dsa_port *dp,
 		.vlan = vlan,
 	};
 
-	if (netif_is_bridge_master(vlan->obj.orig_dev))
-		return -EOPNOTSUPP;
-
 	if (br_vlan_enabled(dp->bridge_dev))
 		return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
 
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7d0c19e..268119c 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1050,8 +1050,6 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
 static const struct switchdev_ops dsa_slave_switchdev_ops = {
 	.switchdev_port_attr_get	= dsa_slave_port_attr_get,
 	.switchdev_port_attr_set	= dsa_slave_port_attr_set,
-	.switchdev_port_obj_add		= dsa_slave_port_obj_add,
-	.switchdev_port_obj_del		= dsa_slave_port_obj_del,
 };
 
 static struct device_type dsa_type = {
@@ -1557,6 +1555,44 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
 	return NOTIFY_BAD;
 }
 
+static int
+dsa_slave_switchdev_port_obj_event(unsigned long event,
+			struct net_device *netdev,
+			struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+	int err = -EOPNOTSUPP;
+
+	switch (event) {
+	case SWITCHDEV_PORT_OBJ_ADD:
+		err = dsa_slave_port_obj_add(netdev, port_obj_info->obj,
+					     port_obj_info->trans);
+		break;
+	case SWITCHDEV_PORT_OBJ_DEL:
+		err = dsa_slave_port_obj_del(netdev, port_obj_info->obj);
+		break;
+	}
+
+	port_obj_info->handled = true;
+	return notifier_from_errno(err);
+}
+
+static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
+					      unsigned long event, void *ptr)
+{
+	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+	if (!dsa_slave_dev_check(dev))
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
+	case SWITCHDEV_PORT_OBJ_DEL:
+		return dsa_slave_switchdev_port_obj_event(event, dev, ptr);
+	}
+
+	return NOTIFY_DONE;
+}
+
 static struct notifier_block dsa_slave_nb __read_mostly = {
 	.notifier_call  = dsa_slave_netdevice_event,
 };
@@ -1565,8 +1601,13 @@ static struct notifier_block dsa_slave_switchdev_notifier = {
 	.notifier_call = dsa_slave_switchdev_event,
 };
 
+static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
+	.notifier_call = dsa_slave_switchdev_blocking_event,
+};
+
 int dsa_slave_register_notifier(void)
 {
+	struct notifier_block *nb;
 	int err;
 
 	err = register_netdevice_notifier(&dsa_slave_nb);
@@ -1577,8 +1618,15 @@ int dsa_slave_register_notifier(void)
 	if (err)
 		goto err_switchdev_nb;
 
+	nb = &dsa_slave_switchdev_blocking_notifier;
+	err = register_switchdev_blocking_notifier(nb);
+	if (err)
+		goto err_switchdev_blocking_nb;
+
 	return 0;
 
+err_switchdev_blocking_nb:
+	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
 err_switchdev_nb:
 	unregister_netdevice_notifier(&dsa_slave_nb);
 	return err;
@@ -1586,8 +1634,14 @@ int dsa_slave_register_notifier(void)
 
 void dsa_slave_unregister_notifier(void)
 {
+	struct notifier_block *nb;
 	int err;
 
+	nb = &dsa_slave_switchdev_blocking_notifier;
+	err = unregister_switchdev_blocking_notifier(nb);
+	if (err)
+		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
+
 	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
 	if (err)
 		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index fd8faa0..58933fa 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -165,15 +165,17 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
 	eth = (struct ethhdr *)skb->data;
 	skb_pull_inline(skb, ETH_HLEN);
 
-	if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
-		if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
-			skb->pkt_type = PACKET_BROADCAST;
-		else
-			skb->pkt_type = PACKET_MULTICAST;
+	if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+					      dev->dev_addr))) {
+		if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+			if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+				skb->pkt_type = PACKET_BROADCAST;
+			else
+				skb->pkt_type = PACKET_MULTICAST;
+		} else {
+			skb->pkt_type = PACKET_OTHERHOST;
+		}
 	}
-	else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
-						   dev->dev_addr)))
-		skb->pkt_type = PACKET_OTHERHOST;
 
 	/*
 	 * Some variants of DSA tagging don't have an ethertype field
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1fbe2f8..326c422 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -208,6 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
 	if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
 		goto out;
 
+	sk->sk_max_ack_backlog = backlog;
 	/* Really, if the socket is already in listen state
 	 * we can only allow the backlog to be adjusted.
 	 */
@@ -231,7 +232,6 @@ int inet_listen(struct socket *sock, int backlog)
 			goto out;
 		tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
 	}
-	sk->sk_max_ack_backlog = backlog;
 	err = 0;
 
 out:
@@ -1964,6 +1964,8 @@ static int __init inet_init(void)
 	/* Add UDP-Lite (RFC 3828) */
 	udplite4_register();
 
+	raw_init();
+
 	ping_init();
 
 	/*
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b5c3937..5022bc6 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1076,7 +1076,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
 	if (!fi)
 		goto failure;
 	fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
-					      cfg->fc_mx_len);
+					      cfg->fc_mx_len, extack);
 	if (unlikely(IS_ERR(fi->fib_metrics))) {
 		err = PTR_ERR(fi->fib_metrics);
 		kfree(fi);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 500a599..0d0ad19 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -3,6 +3,7 @@
 #include <linux/socket.h>
 #include <linux/skbuff.h>
 #include <linux/ip.h>
+#include <linux/icmp.h>
 #include <linux/udp.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -1003,15 +1004,82 @@ static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
 	return 0;
 }
 
+static int gue_err_proto_handler(int proto, struct sk_buff *skb, u32 info)
+{
+	const struct net_protocol *ipprot = rcu_dereference(inet_protos[proto]);
+
+	if (ipprot && ipprot->err_handler) {
+		if (!ipprot->err_handler(skb, info))
+			return 0;
+	}
+
+	return -ENOENT;
+}
+
+static int gue_err(struct sk_buff *skb, u32 info)
+{
+	int transport_offset = skb_transport_offset(skb);
+	struct guehdr *guehdr;
+	size_t optlen;
+	int ret;
+
+	if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+		return -EINVAL;
+
+	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+
+	switch (guehdr->version) {
+	case 0: /* Full GUE header present */
+		break;
+	case 1: {
+		/* Direct encasulation of IPv4 or IPv6 */
+		skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
+
+		switch (((struct iphdr *)guehdr)->version) {
+		case 4:
+			ret = gue_err_proto_handler(IPPROTO_IPIP, skb, info);
+			goto out;
+#if IS_ENABLED(CONFIG_IPV6)
+		case 6:
+			ret = gue_err_proto_handler(IPPROTO_IPV6, skb, info);
+			goto out;
+#endif
+		default:
+			ret = -EOPNOTSUPP;
+			goto out;
+		}
+	}
+	default: /* Undefined version */
+		return -EOPNOTSUPP;
+	}
+
+	if (guehdr->control)
+		return -ENOENT;
+
+	optlen = guehdr->hlen << 2;
+
+	if (validate_gue_flags(guehdr, optlen))
+		return -EINVAL;
+
+	skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
+	ret = gue_err_proto_handler(guehdr->proto_ctype, skb, info);
+
+out:
+	skb_set_transport_header(skb, transport_offset);
+	return ret;
+}
+
 
 static const struct ip_tunnel_encap_ops fou_iptun_ops = {
 	.encap_hlen = fou_encap_hlen,
 	.build_header = fou_build_header,
+	.err_handler = gue_err,
 };
 
 static const struct ip_tunnel_encap_ops gue_iptun_ops = {
 	.encap_hlen = gue_encap_hlen,
 	.build_header = gue_build_header,
+	.err_handler = gue_err,
 };
 
 static int ip_tunnel_encap_add_fou_ops(void)
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 7efe740..a4bf22e 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -151,20 +151,25 @@ static int gre_rcv(struct sk_buff *skb)
 	return NET_RX_DROP;
 }
 
-static void gre_err(struct sk_buff *skb, u32 info)
+static int gre_err(struct sk_buff *skb, u32 info)
 {
 	const struct gre_protocol *proto;
 	const struct iphdr *iph = (const struct iphdr *)skb->data;
 	u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
+	int err = 0;
 
 	if (ver >= GREPROTO_MAX)
-		return;
+		return -EINVAL;
 
 	rcu_read_lock();
 	proto = rcu_dereference(gre_proto[ver]);
 	if (proto && proto->err_handler)
 		proto->err_handler(skb, info);
+	else
+		err = -EPROTONOSUPPORT;
 	rcu_read_unlock();
+
+	return err;
 }
 
 static const struct net_protocol net_gre_protocol = {
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index d832bee..065997f 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1079,7 +1079,7 @@ int icmp_rcv(struct sk_buff *skb)
 	goto drop;
 }
 
-void icmp_err(struct sk_buff *skb, u32 info)
+int icmp_err(struct sk_buff *skb, u32 info)
 {
 	struct iphdr *iph = (struct iphdr *)skb->data;
 	int offset = iph->ihl<<2;
@@ -1094,13 +1094,15 @@ void icmp_err(struct sk_buff *skb, u32 info)
 	 */
 	if (icmph->type != ICMP_ECHOREPLY) {
 		ping_err(skb, offset, info);
-		return;
+		return 0;
 	}
 
 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
 		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP);
 	else if (type == ICMP_REDIRECT)
 		ipv4_redirect(skb, net, 0, IPPROTO_ICMP);
+
+	return 0;
 }
 
 /*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 15e7f79..6ea523d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -183,7 +183,9 @@ inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *
 	int i, low, high, attempt_half;
 	struct inet_bind_bucket *tb;
 	u32 remaining, offset;
+	int l3mdev;
 
+	l3mdev = inet_sk_bound_l3mdev(sk);
 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 other_half_scan:
 	inet_get_local_port_range(net, &low, &high);
@@ -219,7 +221,8 @@ inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *
 						  hinfo->bhash_size)];
 		spin_lock_bh(&head->lock);
 		inet_bind_bucket_for_each(tb, &head->chain)
-			if (net_eq(ib_net(tb), net) && tb->port == port) {
+			if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+			    tb->port == port) {
 				if (!inet_csk_bind_conflict(sk, tb, false, false))
 					goto success;
 				goto next_port;
@@ -293,6 +296,9 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
 	struct net *net = sock_net(sk);
 	struct inet_bind_bucket *tb = NULL;
 	kuid_t uid = sock_i_uid(sk);
+	int l3mdev;
+
+	l3mdev = inet_sk_bound_l3mdev(sk);
 
 	if (!port) {
 		head = inet_csk_find_open_port(sk, &tb, &port);
@@ -306,11 +312,12 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
 					  hinfo->bhash_size)];
 	spin_lock_bh(&head->lock);
 	inet_bind_bucket_for_each(tb, &head->chain)
-		if (net_eq(ib_net(tb), net) && tb->port == port)
+		if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+		    tb->port == port)
 			goto tb_found;
 tb_not_found:
 	tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
-				     net, head, port);
+				     net, head, port, l3mdev);
 	if (!tb)
 		goto fail_unlock;
 tb_found:
@@ -874,7 +881,6 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
 
 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
 
-	sk->sk_max_ack_backlog = backlog;
 	sk->sk_ack_backlog = 0;
 	inet_csk_delack_init(sk);
 
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 411dd7a..13890d5 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -65,12 +65,14 @@ static u32 sk_ehashfn(const struct sock *sk)
 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
 						 struct net *net,
 						 struct inet_bind_hashbucket *head,
-						 const unsigned short snum)
+						 const unsigned short snum,
+						 int l3mdev)
 {
 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
 	if (tb) {
 		write_pnet(&tb->ib_net, net);
+		tb->l3mdev    = l3mdev;
 		tb->port      = snum;
 		tb->fastreuse = 0;
 		tb->fastreuseport = 0;
@@ -135,6 +137,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
 			table->bhash_size);
 	struct inet_bind_hashbucket *head = &table->bhash[bhash];
 	struct inet_bind_bucket *tb;
+	int l3mdev;
 
 	spin_lock(&head->lock);
 	tb = inet_csk(sk)->icsk_bind_hash;
@@ -143,6 +146,8 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
 		return -ENOENT;
 	}
 	if (tb->port != port) {
+		l3mdev = inet_sk_bound_l3mdev(sk);
+
 		/* NOTE: using tproxy and redirecting skbs to a proxy
 		 * on a different listener port breaks the assumption
 		 * that the listener socket's icsk_bind_hash is the same
@@ -150,12 +155,13 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
 		 * create a new bind bucket for the child here. */
 		inet_bind_bucket_for_each(tb, &head->chain) {
 			if (net_eq(ib_net(tb), sock_net(sk)) &&
-			    tb->port == port)
+			    tb->l3mdev == l3mdev && tb->port == port)
 				break;
 		}
 		if (!tb) {
 			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
-						     sock_net(sk), head, port);
+						     sock_net(sk), head, port,
+						     l3mdev);
 			if (!tb) {
 				spin_unlock(&head->lock);
 				return -ENOMEM;
@@ -229,6 +235,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
 {
 	int score = -1;
 	struct inet_sock *inet = inet_sk(sk);
+	bool dev_match;
 
 	if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
 			!ipv6_only_sock(sk)) {
@@ -239,15 +246,12 @@ static inline int compute_score(struct sock *sk, struct net *net,
 				return -1;
 			score += 4;
 		}
-		if (sk->sk_bound_dev_if || exact_dif) {
-			bool dev_match = (sk->sk_bound_dev_if == dif ||
-					  sk->sk_bound_dev_if == sdif);
+		dev_match = inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
+						 dif, sdif);
+		if (!dev_match)
+			return -1;
+		score += 4;
 
-			if (!dev_match)
-				return -1;
-			if (sk->sk_bound_dev_if)
-				score += 4;
-		}
 		if (sk->sk_incoming_cpu == raw_smp_processor_id())
 			score++;
 	}
@@ -675,6 +679,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 	u32 remaining, offset;
 	int ret, i, low, high;
 	static u32 hint;
+	int l3mdev;
 
 	if (port) {
 		head = &hinfo->bhash[inet_bhashfn(net, port,
@@ -693,6 +698,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 		return ret;
 	}
 
+	l3mdev = inet_sk_bound_l3mdev(sk);
+
 	inet_get_local_port_range(net, &low, &high);
 	high++; /* [32768, 60999] -> [32768, 61000[ */
 	remaining = high - low;
@@ -719,7 +726,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 		 * the established check is already unique enough.
 		 */
 		inet_bind_bucket_for_each(tb, &head->chain) {
-			if (net_eq(ib_net(tb), net) && tb->port == port) {
+			if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+			    tb->port == port) {
 				if (tb->fastreuse >= 0 ||
 				    tb->fastreuseport >= 0)
 					goto next_port;
@@ -732,7 +740,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 		}
 
 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
-					     net, head, port);
+					     net, head, port, l3mdev);
 		if (!tb) {
 			spin_unlock_bh(&head->lock);
 			return -ENOMEM;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 38befe8..76a9a5f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -121,8 +121,8 @@ static unsigned int ipgre_net_id __read_mostly;
 static unsigned int gre_tap_net_id __read_mostly;
 static unsigned int erspan_net_id __read_mostly;
 
-static void ipgre_err(struct sk_buff *skb, u32 info,
-		      const struct tnl_ptk_info *tpi)
+static int ipgre_err(struct sk_buff *skb, u32 info,
+		     const struct tnl_ptk_info *tpi)
 {
 
 	/* All the routers (except for Linux) return only
@@ -146,36 +146,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
 	unsigned int data_len = 0;
 	struct ip_tunnel *t;
 
-	switch (type) {
-	default:
-	case ICMP_PARAMETERPROB:
-		return;
-
-	case ICMP_DEST_UNREACH:
-		switch (code) {
-		case ICMP_SR_FAILED:
-		case ICMP_PORT_UNREACH:
-			/* Impossible event. */
-			return;
-		default:
-			/* All others are translated to HOST_UNREACH.
-			   rfc2003 contains "deep thoughts" about NET_UNREACH,
-			   I believe they are just ether pollution. --ANK
-			 */
-			break;
-		}
-		break;
-
-	case ICMP_TIME_EXCEEDED:
-		if (code != ICMP_EXC_TTL)
-			return;
-		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
-		break;
-
-	case ICMP_REDIRECT:
-		break;
-	}
-
 	if (tpi->proto == htons(ETH_P_TEB))
 		itn = net_generic(net, gre_tap_net_id);
 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
@@ -189,27 +159,59 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
 			     iph->daddr, iph->saddr, tpi->key);
 
 	if (!t)
-		return;
+		return -ENOENT;
+
+	switch (type) {
+	default:
+	case ICMP_PARAMETERPROB:
+		return 0;
+
+	case ICMP_DEST_UNREACH:
+		switch (code) {
+		case ICMP_SR_FAILED:
+		case ICMP_PORT_UNREACH:
+			/* Impossible event. */
+			return 0;
+		default:
+			/* All others are translated to HOST_UNREACH.
+			   rfc2003 contains "deep thoughts" about NET_UNREACH,
+			   I believe they are just ether pollution. --ANK
+			 */
+			break;
+		}
+		break;
+
+	case ICMP_TIME_EXCEEDED:
+		if (code != ICMP_EXC_TTL)
+			return 0;
+		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
+		break;
+
+	case ICMP_REDIRECT:
+		break;
+	}
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (tpi->proto == htons(ETH_P_IPV6) &&
            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 				       type, data_len))
-               return;
+               return 0;
 #endif
 
 	if (t->parms.iph.daddr == 0 ||
 	    ipv4_is_multicast(t->parms.iph.daddr))
-		return;
+		return 0;
 
 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-		return;
+		return 0;
 
 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 		t->err_count++;
 	else
 		t->err_count = 1;
 	t->err_time = jiffies;
+
+	return 0;
 }
 
 static void gre_err(struct sk_buff *skb, u32 info)
@@ -1601,7 +1603,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
 	memset(&tb, 0, sizeof(tb));
 
 	dev = rtnl_create_link(net, name, name_assign_type,
-			       &ipgre_tap_ops, tb);
+			       &ipgre_tap_ops, tb, NULL);
 	if (IS_ERR(dev))
 		return dev;
 
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 35a786c..72250b4 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -188,51 +188,50 @@ bool ip_call_ra_chain(struct sk_buff *skb)
 	return false;
 }
 
+void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
+{
+	const struct net_protocol *ipprot;
+	int raw, ret;
+
+resubmit:
+	raw = raw_local_deliver(skb, protocol);
+
+	ipprot = rcu_dereference(inet_protos[protocol]);
+	if (ipprot) {
+		if (!ipprot->no_policy) {
+			if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+				kfree_skb(skb);
+				return;
+			}
+			nf_reset(skb);
+		}
+		ret = ipprot->handler(skb);
+		if (ret < 0) {
+			protocol = -ret;
+			goto resubmit;
+		}
+		__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
+	} else {
+		if (!raw) {
+			if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+				__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
+				icmp_send(skb, ICMP_DEST_UNREACH,
+					  ICMP_PROT_UNREACH, 0);
+			}
+			kfree_skb(skb);
+		} else {
+			__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
+			consume_skb(skb);
+		}
+	}
+}
+
 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	__skb_pull(skb, skb_network_header_len(skb));
 
 	rcu_read_lock();
-	{
-		int protocol = ip_hdr(skb)->protocol;
-		const struct net_protocol *ipprot;
-		int raw;
-
-	resubmit:
-		raw = raw_local_deliver(skb, protocol);
-
-		ipprot = rcu_dereference(inet_protos[protocol]);
-		if (ipprot) {
-			int ret;
-
-			if (!ipprot->no_policy) {
-				if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-					kfree_skb(skb);
-					goto out;
-				}
-				nf_reset(skb);
-			}
-			ret = ipprot->handler(skb);
-			if (ret < 0) {
-				protocol = -ret;
-				goto resubmit;
-			}
-			__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
-		} else {
-			if (!raw) {
-				if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-					__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
-					icmp_send(skb, ICMP_DEST_UNREACH,
-						  ICMP_PROT_UNREACH, 0);
-				}
-				kfree_skb(skb);
-			} else {
-				__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
-				consume_skb(skb);
-			}
-		}
-	}
- out:
+	ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
 	rcu_read_unlock();
 
 	return 0;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index c248e0d..c857ec6 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -120,7 +120,7 @@ int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
 	}
 
 	skb_clear_hash_if_not_l4(skb);
-	skb->vlan_tci = 0;
+	__vlan_hwaccel_clear_tag(skb);
 	skb_set_queue_mapping(skb, 0);
 	skb_scrub_packet(skb, xnet);
 
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e65287c2..57c5dd2 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -140,6 +140,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
 	struct ip_tunnel *t;
 	int err = 0;
 
+	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+			     iph->daddr, iph->saddr, 0);
+	if (!t) {
+		err = -ENOENT;
+		goto out;
+	}
+
 	switch (type) {
 	case ICMP_DEST_UNREACH:
 		switch (code) {
@@ -167,13 +174,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
 		goto out;
 	}
 
-	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
-			     iph->daddr, iph->saddr, 0);
-	if (!t) {
-		err = -ENOENT;
-		goto out;
-	}
-
 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 		ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol);
 		goto out;
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 6d218f5..ca9a5fe 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -6,7 +6,8 @@
 #include <net/tcp.h>
 
 static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
-			      int fc_mx_len, u32 *metrics)
+			      int fc_mx_len, u32 *metrics,
+			      struct netlink_ext_ack *extack)
 {
 	bool ecn_ca = false;
 	struct nlattr *nla;
@@ -21,19 +22,26 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
 
 		if (!type)
 			continue;
-		if (type > RTAX_MAX)
+		if (type > RTAX_MAX) {
+			NL_SET_ERR_MSG(extack, "Invalid metric type");
 			return -EINVAL;
+		}
 
 		if (type == RTAX_CC_ALGO) {
 			char tmp[TCP_CA_NAME_MAX];
 
 			nla_strlcpy(tmp, nla, sizeof(tmp));
 			val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
-			if (val == TCP_CA_UNSPEC)
+			if (val == TCP_CA_UNSPEC) {
+				NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm");
 				return -EINVAL;
+			}
 		} else {
-			if (nla_len(nla) != sizeof(u32))
+			if (nla_len(nla) != sizeof(u32)) {
+				NL_SET_ERR_MSG_ATTR(extack, nla,
+						    "Invalid attribute in metrics");
 				return -EINVAL;
+			}
 			val = nla_get_u32(nla);
 		}
 		if (type == RTAX_ADVMSS && val > 65535 - 40)
@@ -42,8 +50,10 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
 			val = 65535 - 15;
 		if (type == RTAX_HOPLIMIT && val > 255)
 			val = 255;
-		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) {
+			NL_SET_ERR_MSG(extack, "Unknown flag set in feature mask in metrics attribute");
 			return -EINVAL;
+		}
 		metrics[type - 1] = val;
 	}
 
@@ -54,7 +64,8 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
 }
 
 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
-					int fc_mx_len)
+					int fc_mx_len,
+					struct netlink_ext_ack *extack)
 {
 	struct dst_metrics *fib_metrics;
 	int err;
@@ -66,7 +77,8 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
 	if (unlikely(!fib_metrics))
 		return ERR_PTR(-ENOMEM);
 
-	err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics);
+	err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics,
+				 extack);
 	if (!err) {
 		refcount_set(&fib_metrics->refcnt, 1);
 	} else {
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 32a691b..92d249e 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -29,6 +29,7 @@
 #include <net/protocol.h>
 
 struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
+EXPORT_SYMBOL(inet_protos);
 const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
 EXPORT_SYMBOL(inet_offloads);
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8ca3eb06..fb1f020 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -131,8 +131,7 @@ struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
 		if (net_eq(sock_net(sk), net) && inet->inet_num == num	&&
 		    !(inet->inet_daddr && inet->inet_daddr != raddr) 	&&
 		    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
-		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
-		      sk->sk_bound_dev_if != sdif))
+		    raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
 			goto found; /* gotcha */
 	}
 	sk = NULL;
@@ -805,7 +804,7 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	return copied;
 }
 
-static int raw_init(struct sock *sk)
+static int raw_sk_init(struct sock *sk)
 {
 	struct raw_sock *rp = raw_sk(sk);
 
@@ -970,7 +969,7 @@ struct proto raw_prot = {
 	.connect	   = ip4_datagram_connect,
 	.disconnect	   = __udp_disconnect,
 	.ioctl		   = raw_ioctl,
-	.init		   = raw_init,
+	.init		   = raw_sk_init,
 	.setsockopt	   = raw_setsockopt,
 	.getsockopt	   = raw_getsockopt,
 	.sendmsg	   = raw_sendmsg,
@@ -1133,4 +1132,28 @@ void __init raw_proc_exit(void)
 {
 	unregister_pernet_subsys(&raw_net_ops);
 }
+
+static void raw_sysctl_init_net(struct net *net)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+	net->ipv4.sysctl_raw_l3mdev_accept = 1;
+#endif
+}
+
+static int __net_init raw_sysctl_init(struct net *net)
+{
+	raw_sysctl_init_net(net);
+	return 0;
+}
+
+static struct pernet_operations __net_initdata raw_sysctl_ops = {
+	.init	= raw_sysctl_init,
+};
+
+void __init raw_init(void)
+{
+	raw_sysctl_init_net(&init_net);
+	if (register_pernet_subsys(&raw_sysctl_ops))
+		panic("RAW: failed to init sysctl parameters.\n");
+}
 #endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c0a9d26..c4ddbc5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1677,7 +1677,7 @@ static void ip_handle_martian_source(struct net_device *dev,
 			print_hex_dump(KERN_WARNING, "ll header: ",
 				       DUMP_PREFIX_OFFSET, 16, 1,
 				       skb_mac_header(skb),
-				       dev->hard_header_len, true);
+				       dev->hard_header_len, false);
 		}
 	}
 #endif
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 891ed2f..ba0fc4b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -602,6 +602,17 @@ static struct ctl_table ipv4_net_table[] = {
 		.mode		= 0644,
 		.proc_handler	= ipv4_ping_group_range,
 	},
+#ifdef CONFIG_NET_L3_MASTER_DEV
+	{
+		.procname	= "raw_l3mdev_accept",
+		.data		= &init_net.ipv4.sysctl_raw_l3mdev_accept,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+#endif
 	{
 		.procname	= "tcp_ecn",
 		.data		= &init_net.ipv4.sysctl_tcp_ecn,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9e6bc4d..2520487 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2241,10 +2241,6 @@ void tcp_set_state(struct sock *sk, int state)
 	 * socket sitting in hash tables.
 	 */
 	inet_sk_state_store(sk, state);
-
-#ifdef STATE_TRACE
-	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
-#endif
 }
 EXPORT_SYMBOL_GPL(tcp_set_state);
 
@@ -3246,6 +3242,7 @@ static size_t tcp_opt_stats_get_size(void)
 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
 		nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
 		nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
+		nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
 		0;
 }
 
@@ -3299,6 +3296,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
 			  TCP_NLA_PAD);
 	nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
 	nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
+	nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
 
 	return stats;
 }
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 9277abd..0f497fc 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -128,7 +128,12 @@ static const u32 bbr_probe_rtt_mode_ms = 200;
 /* Skip TSO below the following bandwidth (bits/sec): */
 static const int bbr_min_tso_rate = 1200000;
 
-/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck. */
+/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
+ * In order to help drive the network toward lower queues and low latency while
+ * maintaining high utilization, the average pacing rate aims to be slightly
+ * lower than the estimated bandwidth. This is an important aspect of the
+ * design.
+ */
 static const int bbr_pacing_margin_percent = 1;
 
 /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
@@ -247,13 +252,7 @@ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
 	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
 }
 
-/* Pace using current bw estimate and a gain factor. In order to help drive the
- * network toward lower queues while maintaining high utilization and low
- * latency, the average pacing rate aims to be slightly (~1%) lower than the
- * estimated bandwidth. This is an important aspect of the design. In this
- * implementation this slightly lower pacing rate is achieved implicitly by not
- * including link-layer headers in the packet size used for the pacing rate.
- */
+/* Pace using current bw estimate and a gain factor. */
 static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1e37c13..568dbf3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2457,8 +2457,8 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
 		u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
 			       tp->prior_cwnd - 1;
 		sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-	} else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
-		   !(flag & FLAG_LOST_RETRANS)) {
+	} else if ((flag & (FLAG_RETRANS_DATA_ACKED | FLAG_LOST_RETRANS)) ==
+		   FLAG_RETRANS_DATA_ACKED) {
 		sndcnt = min_t(int, delta,
 			       max_t(int, tp->prr_delivered - tp->prr_out,
 				     newly_acked_sacked) + 1);
@@ -3610,7 +3610,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 	if (flag & FLAG_UPDATE_TS_RECENT)
 		tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
 
-	if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
+	if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
+	    FLAG_SND_UNA_ADVANCED) {
 		/* Window is constant, pure forward advance.
 		 * No more checks are required.
 		 * Note, we use the fact that SND.UNA>=SND.WL2.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index de47038a..795605a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -423,7 +423,7 @@ EXPORT_SYMBOL(tcp_req_err);
  *
  */
 
-void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 {
 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
@@ -446,20 +446,21 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 				       inet_iif(icmp_skb), 0);
 	if (!sk) {
 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
-		return;
+		return -ENOENT;
 	}
 	if (sk->sk_state == TCP_TIME_WAIT) {
 		inet_twsk_put(inet_twsk(sk));
-		return;
+		return 0;
 	}
 	seq = ntohl(th->seq);
-	if (sk->sk_state == TCP_NEW_SYN_RECV)
-		return tcp_req_err(sk, seq,
-				  type == ICMP_PARAMETERPROB ||
-				  type == ICMP_TIME_EXCEEDED ||
-				  (type == ICMP_DEST_UNREACH &&
-				   (code == ICMP_NET_UNREACH ||
-				    code == ICMP_HOST_UNREACH)));
+	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
+				     type == ICMP_TIME_EXCEEDED ||
+				     (type == ICMP_DEST_UNREACH &&
+				      (code == ICMP_NET_UNREACH ||
+				       code == ICMP_HOST_UNREACH)));
+		return 0;
+	}
 
 	bh_lock_sock(sk);
 	/* If too many ICMPs get dropped on busy
@@ -541,7 +542,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 
 		skb = tcp_rtx_queue_head(sk);
-		BUG_ON(!skb);
 
 		tcp_mstamp_refresh(tp);
 		delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
@@ -613,6 +613,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 out:
 	bh_unlock_sock(sk);
 	sock_put(sk);
+	return 0;
 }
 
 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
@@ -1633,6 +1634,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 	 */
 	skb_condense(skb);
 
+	skb_dst_drop(skb);
+
 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
 		bh_unlock_sock(sk);
 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
@@ -2573,8 +2576,8 @@ static int __net_init tcp_sk_init(struct net *net)
 	 * which are too large can cause TCP streams to be bursty.
 	 */
 	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
-	/* Default TSQ limit of four TSO segments */
-	net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
+	/* Default TSQ limit of 16 TSO segments */
+	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
 	/* rfc5961 challenge ack rate limiting */
 	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
 	net->ipv4.sysctl_tcp_min_tso_segs = 2;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f510ca..c5dc4c4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1907,10 +1907,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 				 bool *is_cwnd_limited, u32 max_segs)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
-	u32 age, send_win, cong_win, limit, in_flight;
+	u32 send_win, cong_win, limit, in_flight;
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *head;
 	int win_divisor;
+	s64 delta;
 
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 		goto send_now;
@@ -1919,9 +1920,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 		goto send_now;
 
 	/* Avoid bursty behavior by allowing defer
-	 * only if the last write was recent.
+	 * only if the last write was recent (1 ms).
+	 * Note that tp->tcp_wstamp_ns can be in the future if we have
+	 * packets waiting in a qdisc or device for EDT delivery.
 	 */
-	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
+	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
+	if (delta > 0)
 		goto send_now;
 
 	in_flight = tcp_packets_in_flight(tp);
@@ -1944,6 +1948,10 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
 		goto send_now;
 
+	/* If this packet won't get more data, do not wait. */
+	if (TCP_SKB_CB(skb)->eor)
+		goto send_now;
+
 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
 	if (win_divisor) {
 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1968,9 +1976,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 	head = tcp_rtx_queue_head(sk);
 	if (!head)
 		goto send_now;
-	age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
+	delta = tp->tcp_clock_cache - head->tstamp;
 	/* If next ACK is likely to come too late (half srtt), do not defer */
-	if (age < (tp->srtt_us >> 4))
+	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
 		goto send_now;
 
 	/* Ok, it looks like it is advisable to defer. */
@@ -2212,8 +2220,9 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 	limit = max_t(unsigned long,
 		      2 * skb->truesize,
 		      sk->sk_pacing_rate >> sk->sk_pacing_shift);
-	limit = min_t(unsigned long, limit,
-		      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
+	if (sk->sk_pacing_status == SK_PACING_NONE)
+		limit = min_t(unsigned long, limit,
+			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
 	limit <<= factor;
 
 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index c063001..33bf8e9 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -149,34 +149,40 @@ static int tunnelmpls4_rcv(struct sk_buff *skb)
 }
 #endif
 
-static void tunnel4_err(struct sk_buff *skb, u32 info)
+static int tunnel4_err(struct sk_buff *skb, u32 info)
 {
 	struct xfrm_tunnel *handler;
 
 	for_each_tunnel_rcu(tunnel4_handlers, handler)
 		if (!handler->err_handler(skb, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static void tunnel64_err(struct sk_buff *skb, u32 info)
+static int tunnel64_err(struct sk_buff *skb, u32 info)
 {
 	struct xfrm_tunnel *handler;
 
 	for_each_tunnel_rcu(tunnel64_handlers, handler)
 		if (!handler->err_handler(skb, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 #endif
 
 #if IS_ENABLED(CONFIG_MPLS)
-static void tunnelmpls4_err(struct sk_buff *skb, u32 info)
+static int tunnelmpls4_err(struct sk_buff *skb, u32 info)
 {
 	struct xfrm_tunnel *handler;
 
 	for_each_tunnel_rcu(tunnelmpls4_handlers, handler)
 		if (!handler->err_handler(skb, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 #endif
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1976fdd..aff2a8e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -105,6 +105,7 @@
 #include <net/net_namespace.h>
 #include <net/icmp.h>
 #include <net/inet_hashtables.h>
+#include <net/ip_tunnels.h>
 #include <net/route.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
@@ -115,6 +116,7 @@
 #include "udp_impl.h"
 #include <net/sock_reuseport.h>
 #include <net/addrconf.h>
+#include <net/udp_tunnel.h>
 
 struct udp_table udp_table __read_mostly;
 EXPORT_SYMBOL(udp_table);
@@ -371,6 +373,7 @@ static int compute_score(struct sock *sk, struct net *net,
 {
 	int score;
 	struct inet_sock *inet;
+	bool dev_match;
 
 	if (!net_eq(sock_net(sk), net) ||
 	    udp_sk(sk)->udp_port_hash != hnum ||
@@ -398,15 +401,11 @@ static int compute_score(struct sock *sk, struct net *net,
 		score += 4;
 	}
 
-	if (sk->sk_bound_dev_if || exact_dif) {
-		bool dev_match = (sk->sk_bound_dev_if == dif ||
-				  sk->sk_bound_dev_if == sdif);
-
-		if (!dev_match)
-			return -1;
-		if (sk->sk_bound_dev_if)
-			score += 4;
-	}
+	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
+					dif, sdif);
+	if (!dev_match)
+		return -1;
+	score += 4;
 
 	if (sk->sk_incoming_cpu == raw_smp_processor_id())
 		score++;
@@ -585,6 +584,89 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
 	return true;
 }
 
+DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
+void udp_encap_enable(void)
+{
+	static_branch_inc(&udp_encap_needed_key);
+}
+EXPORT_SYMBOL(udp_encap_enable);
+
+/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
+ * through error handlers in encapsulations looking for a match.
+ */
+static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
+{
+	int i;
+
+	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
+		int (*handler)(struct sk_buff *skb, u32 info);
+
+		if (!iptun_encaps[i])
+			continue;
+		handler = rcu_dereference(iptun_encaps[i]->err_handler);
+		if (handler && !handler(skb, info))
+			return 0;
+	}
+
+	return -ENOENT;
+}
+
+/* Try to match ICMP errors to UDP tunnels by looking up a socket without
+ * reversing source and destination port: this will match tunnels that force the
+ * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
+ * lwtunnels might actually break this assumption by being configured with
+ * different destination ports on endpoints, in this case we won't be able to
+ * trace ICMP messages back to them.
+ *
+ * If this doesn't match any socket, probe tunnels with arbitrary destination
+ * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
+ * we've sent packets to won't necessarily match the local destination port.
+ *
+ * Then ask the tunnel implementation to match the error against a valid
+ * association.
+ *
+ * Return an error if we can't find a match, the socket if we need further
+ * processing, zero otherwise.
+ */
+static struct sock *__udp4_lib_err_encap(struct net *net,
+					 const struct iphdr *iph,
+					 struct udphdr *uh,
+					 struct udp_table *udptable,
+					 struct sk_buff *skb, u32 info)
+{
+	int network_offset, transport_offset;
+	struct sock *sk;
+
+	network_offset = skb_network_offset(skb);
+	transport_offset = skb_transport_offset(skb);
+
+	/* Network header needs to point to the outer IPv4 header inside ICMP */
+	skb_reset_network_header(skb);
+
+	/* Transport header needs to point to the UDP header */
+	skb_set_transport_header(skb, iph->ihl << 2);
+
+	sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
+			       iph->saddr, uh->dest, skb->dev->ifindex, 0,
+			       udptable, NULL);
+	if (sk) {
+		int (*lookup)(struct sock *sk, struct sk_buff *skb);
+		struct udp_sock *up = udp_sk(sk);
+
+		lookup = READ_ONCE(up->encap_err_lookup);
+		if (!lookup || lookup(sk, skb))
+			sk = NULL;
+	}
+
+	if (!sk)
+		sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
+
+	skb_set_transport_header(skb, transport_offset);
+	skb_set_network_header(skb, network_offset);
+
+	return sk;
+}
+
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -596,13 +678,14 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
  * to find the appropriate port.
  */
 
-void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
+int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 {
 	struct inet_sock *inet;
 	const struct iphdr *iph = (const struct iphdr *)skb->data;
 	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
 	const int type = icmp_hdr(skb)->type;
 	const int code = icmp_hdr(skb)->code;
+	bool tunnel = false;
 	struct sock *sk;
 	int harderr;
 	int err;
@@ -612,8 +695,21 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 			       iph->saddr, uh->source, skb->dev->ifindex,
 			       inet_sdif(skb), udptable, NULL);
 	if (!sk) {
-		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
-		return;	/* No socket for error */
+		/* No socket for error: try tunnels before discarding */
+		sk = ERR_PTR(-ENOENT);
+		if (static_branch_unlikely(&udp_encap_needed_key)) {
+			sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
+						  info);
+			if (!sk)
+				return 0;
+		}
+
+		if (IS_ERR(sk)) {
+			__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
+			return PTR_ERR(sk);
+		}
+
+		tunnel = true;
 	}
 
 	err = 0;
@@ -656,6 +752,10 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
 	 *	4.1.3.3.
 	 */
+	if (tunnel) {
+		/* ...not for tunnels though: we don't have a sending socket */
+		goto out;
+	}
 	if (!inet->recverr) {
 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 			goto out;
@@ -665,12 +765,12 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 	sk->sk_err = err;
 	sk->sk_error_report(sk);
 out:
-	return;
+	return 0;
 }
 
-void udp_err(struct sk_buff *skb, u32 info)
+int udp_err(struct sk_buff *skb, u32 info)
 {
-	__udp4_lib_err(skb, info, &udp_table);
+	return __udp4_lib_err(skb, info, &udp_table);
 }
 
 /*
@@ -1713,6 +1813,10 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
 		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 		*addr_len = sizeof(*sin);
 	}
+
+	if (udp_sk(sk)->gro_enabled)
+		udp_cmsg_recv(msg, sk, skb);
+
 	if (inet->cmsg_flags)
 		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
 
@@ -1889,13 +1993,6 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 	return 0;
 }
 
-DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
-void udp_encap_enable(void)
-{
-	static_branch_enable(&udp_encap_needed_key);
-}
-EXPORT_SYMBOL(udp_encap_enable);
-
 /* returns:
  *  -1: error
  *   0: success
@@ -1904,7 +2001,7 @@ EXPORT_SYMBOL(udp_encap_enable);
  * Note that in the success and error cases, the skb is assumed to
  * have either been requeued or freed.
  */
-static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 {
 	struct udp_sock *up = udp_sk(sk);
 	int is_udplite = IS_UDPLITE(sk);
@@ -2007,6 +2104,27 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 	return -1;
 }
 
+static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+	struct sk_buff *next, *segs;
+	int ret;
+
+	if (likely(!udp_unexpected_gso(sk, skb)))
+		return udp_queue_rcv_one_skb(sk, skb);
+
+	BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
+	__skb_push(skb, -skb_mac_offset(skb));
+	segs = udp_rcv_segment(sk, skb, true);
+	for (skb = segs; skb; skb = next) {
+		next = skb->next;
+		__skb_pull(skb, skb_transport_offset(skb));
+		ret = udp_queue_rcv_one_skb(sk, skb);
+		if (ret > 0)
+			ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
+	}
+	return 0;
+}
+
 /* For TCP sockets, sk_rx_dst is protected by socket lock
  * For UDP, we use xchg() to guard against concurrent changes.
  */
@@ -2398,11 +2516,15 @@ void udp_destroy_sock(struct sock *sk)
 	bool slow = lock_sock_fast(sk);
 	udp_flush_pending_frames(sk);
 	unlock_sock_fast(sk, slow);
-	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
-		void (*encap_destroy)(struct sock *sk);
-		encap_destroy = READ_ONCE(up->encap_destroy);
-		if (encap_destroy)
-			encap_destroy(sk);
+	if (static_branch_unlikely(&udp_encap_needed_key)) {
+		if (up->encap_type) {
+			void (*encap_destroy)(struct sock *sk);
+			encap_destroy = READ_ONCE(up->encap_destroy);
+			if (encap_destroy)
+				encap_destroy(sk);
+		}
+		if (up->encap_enabled)
+			static_branch_dec(&udp_encap_needed_key);
 	}
 }
 
@@ -2447,7 +2569,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 			/* FALLTHROUGH */
 		case UDP_ENCAP_L2TPINUDP:
 			up->encap_type = val;
-			udp_encap_enable();
+			lock_sock(sk);
+			udp_tunnel_encap_enable(sk->sk_socket);
+			release_sock(sk);
 			break;
 		default:
 			err = -ENOPROTOOPT;
@@ -2469,6 +2593,14 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 		up->gso_size = val;
 		break;
 
+	case UDP_GRO:
+		lock_sock(sk);
+		if (valbool)
+			udp_tunnel_encap_enable(sk->sk_socket);
+		up->gro_enabled = valbool;
+		release_sock(sk);
+		break;
+
 	/*
 	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
 	 */
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index e7d18b1..3226726 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -7,7 +7,7 @@
 #include <net/inet_common.h>
 
 int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
-void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
+int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
 
 int udp_v4_get_port(struct sock *sk, unsigned short snum);
 
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 802f2bc..0646d61 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -343,6 +343,54 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 	return segs;
 }
 
+#define UDP_GRO_CNT_MAX 64
+static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+					       struct sk_buff *skb)
+{
+	struct udphdr *uh = udp_hdr(skb);
+	struct sk_buff *pp = NULL;
+	struct udphdr *uh2;
+	struct sk_buff *p;
+
+	/* requires non zero csum, for symmetry with GSO */
+	if (!uh->check) {
+		NAPI_GRO_CB(skb)->flush = 1;
+		return NULL;
+	}
+
+	/* pull encapsulating udp header */
+	skb_gro_pull(skb, sizeof(struct udphdr));
+	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
+
+	list_for_each_entry(p, head, list) {
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		uh2 = udp_hdr(p);
+
+		/* Match ports only, as csum is always non zero */
+		if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
+			NAPI_GRO_CB(p)->same_flow = 0;
+			continue;
+		}
+
+		/* Terminate the flow on len mismatch or if it grow "too much".
+		 * Under small packet flood GRO count could elsewhere grow a lot
+		 * leading to execessive truesize values
+		 */
+		if (!skb_gro_receive(p, skb) &&
+		    NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
+			pp = p;
+		else if (uh->len != uh2->len)
+			pp = p;
+
+		return pp;
+	}
+
+	/* mismatch, but we never need to flush */
+	return NULL;
+}
+
 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
 				struct udphdr *uh, udp_lookup_t lookup)
 {
@@ -353,23 +401,27 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
 	int flush = 1;
 	struct sock *sk;
 
+	rcu_read_lock();
+	sk = (*lookup)(skb, uh->source, uh->dest);
+	if (!sk)
+		goto out_unlock;
+
+	if (udp_sk(sk)->gro_enabled) {
+		pp = call_gro_receive(udp_gro_receive_segment, head, skb);
+		rcu_read_unlock();
+		return pp;
+	}
+
 	if (NAPI_GRO_CB(skb)->encap_mark ||
 	    (skb->ip_summed != CHECKSUM_PARTIAL &&
 	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
-	     !NAPI_GRO_CB(skb)->csum_valid))
-		goto out;
+	     !NAPI_GRO_CB(skb)->csum_valid) ||
+	    !udp_sk(sk)->gro_receive)
+		goto out_unlock;
 
 	/* mark that this skb passed once through the tunnel gro layer */
 	NAPI_GRO_CB(skb)->encap_mark = 1;
 
-	rcu_read_lock();
-	sk = (*lookup)(skb, uh->source, uh->dest);
-
-	if (sk && udp_sk(sk)->gro_receive)
-		goto unflush;
-	goto out_unlock;
-
-unflush:
 	flush = 0;
 
 	list_for_each_entry(p, head, list) {
@@ -394,7 +446,6 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
 
 out_unlock:
 	rcu_read_unlock();
-out:
 	skb_gro_flush_final(skb, pp, flush);
 	return pp;
 }
@@ -427,6 +478,19 @@ static struct sk_buff *udp4_gro_receive(struct list_head *head,
 	return NULL;
 }
 
+static int udp_gro_complete_segment(struct sk_buff *skb)
+{
+	struct udphdr *uh = udp_hdr(skb);
+
+	skb->csum_start = (unsigned char *)uh - skb->head;
+	skb->csum_offset = offsetof(struct udphdr, check);
+	skb->ip_summed = CHECKSUM_PARTIAL;
+
+	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+	return 0;
+}
+
 int udp_gro_complete(struct sk_buff *skb, int nhoff,
 		     udp_lookup_t lookup)
 {
@@ -437,16 +501,21 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
 
 	uh->len = newlen;
 
-	/* Set encapsulation before calling into inner gro_complete() functions
-	 * to make them set up the inner offsets.
-	 */
-	skb->encapsulation = 1;
-
 	rcu_read_lock();
 	sk = (*lookup)(skb, uh->source, uh->dest);
-	if (sk && udp_sk(sk)->gro_complete)
+	if (sk && udp_sk(sk)->gro_enabled) {
+		err = udp_gro_complete_segment(skb);
+	} else if (sk && udp_sk(sk)->gro_complete) {
+		skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM
+					: SKB_GSO_UDP_TUNNEL;
+
+		/* Set encapsulation before calling into inner gro_complete()
+		 * functions to make them set up the inner offsets.
+		 */
+		skb->encapsulation = 1;
 		err = udp_sk(sk)->gro_complete(sk, skb,
 				nhoff + sizeof(struct udphdr));
+	}
 	rcu_read_unlock();
 
 	if (skb->remcsum_offload)
@@ -461,13 +530,9 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
 	const struct iphdr *iph = ip_hdr(skb);
 	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
-	if (uh->check) {
-		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+	if (uh->check)
 		uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
 					  iph->daddr, 0);
-	} else {
-		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
-	}
 
 	return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
 }
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 6539ff1..d0c412f 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -68,6 +68,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
 
 	udp_sk(sk)->encap_type = cfg->encap_type;
 	udp_sk(sk)->encap_rcv = cfg->encap_rcv;
+	udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
 	udp_sk(sk)->encap_destroy = cfg->encap_destroy;
 	udp_sk(sk)->gro_receive = cfg->gro_receive;
 	udp_sk(sk)->gro_complete = cfg->gro_complete;
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 8545457..39c7f17 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -25,9 +25,9 @@ static int udplite_rcv(struct sk_buff *skb)
 	return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
 }
 
-static void udplite_err(struct sk_buff *skb, u32 info)
+static int udplite_err(struct sk_buff *skb, u32 info)
 {
-	__udp4_lib_err(skb, info, &udplite_table);
+	return __udp4_lib_err(skb, info, &udplite_table);
 }
 
 static const struct net_protocol udplite_protocol = {
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index 8dd0e6ab..35c5486 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -106,13 +106,15 @@ static int xfrm4_esp_rcv(struct sk_buff *skb)
 	return 0;
 }
 
-static void xfrm4_esp_err(struct sk_buff *skb, u32 info)
+static int xfrm4_esp_err(struct sk_buff *skb, u32 info)
 {
 	struct xfrm4_protocol *handler;
 
 	for_each_protocol_rcu(esp4_handlers, handler)
 		if (!handler->err_handler(skb, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 static int xfrm4_ah_rcv(struct sk_buff *skb)
@@ -132,13 +134,15 @@ static int xfrm4_ah_rcv(struct sk_buff *skb)
 	return 0;
 }
 
-static void xfrm4_ah_err(struct sk_buff *skb, u32 info)
+static int xfrm4_ah_err(struct sk_buff *skb, u32 info)
 {
 	struct xfrm4_protocol *handler;
 
 	for_each_protocol_rcu(ah4_handlers, handler)
 		if (!handler->err_handler(skb, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
@@ -158,13 +162,15 @@ static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
 	return 0;
 }
 
-static void xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
+static int xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
 {
 	struct xfrm4_protocol *handler;
 
 	for_each_protocol_rcu(ipcomp4_handlers, handler)
 		if (!handler->err_handler(skb, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 static const struct net_protocol esp4_protocol = {
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 9499905..cca3b36 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -433,7 +433,6 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
 bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
 			 const struct in6_addr *addr)
 {
-	unsigned int hash = inet6_acaddr_hash(net, addr);
 	struct net_device *nh_dev;
 	struct ifacaddr6 *aca;
 	bool found = false;
@@ -441,7 +440,9 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
 	rcu_read_lock();
 	if (dev)
 		found = ipv6_chk_acast_dev(dev, addr);
-	else
+	else {
+		unsigned int hash = inet6_acaddr_hash(net, addr);
+
 		hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
 					 aca_addr_lst) {
 			nh_dev = fib6_info_nh_dev(aca->aca_rt);
@@ -452,6 +453,7 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
 				break;
 			}
 		}
+	}
 	rcu_read_unlock();
 	return found;
 }
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1ede7a1..bde08aa 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -772,6 +772,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
 		case IPV6_2292PKTINFO:
 		    {
 			struct net_device *dev = NULL;
+			int src_idx;
 
 			if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
 				err = -EINVAL;
@@ -779,12 +780,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
 			}
 
 			src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+			src_idx = src_info->ipi6_ifindex;
 
-			if (src_info->ipi6_ifindex) {
+			if (src_idx) {
 				if (fl6->flowi6_oif &&
-				    src_info->ipi6_ifindex != fl6->flowi6_oif)
+				    src_idx != fl6->flowi6_oif &&
+				    (sk->sk_bound_dev_if != fl6->flowi6_oif ||
+				     !sk_dev_equal_l3scope(sk, src_idx)))
 					return -EINVAL;
-				fl6->flowi6_oif = src_info->ipi6_ifindex;
+				fl6->flowi6_oif = src_idx;
 			}
 
 			addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index 6de3c04..bd675c6 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -4,6 +4,7 @@
 #include <linux/skbuff.h>
 #include <linux/ip.h>
 #include <linux/udp.h>
+#include <linux/icmpv6.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <net/fou.h>
@@ -69,14 +70,87 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
 	return 0;
 }
 
+static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
+				  struct inet6_skb_parm *opt,
+				  u8 type, u8 code, int offset, u32 info)
+{
+	const struct inet6_protocol *ipprot;
+
+	ipprot = rcu_dereference(inet6_protos[proto]);
+	if (ipprot && ipprot->err_handler) {
+		if (!ipprot->err_handler(skb, opt, type, code, offset, info))
+			return 0;
+	}
+
+	return -ENOENT;
+}
+
+static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+		    u8 type, u8 code, int offset, __be32 info)
+{
+	int transport_offset = skb_transport_offset(skb);
+	struct guehdr *guehdr;
+	size_t optlen;
+	int ret;
+
+	if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+		return -EINVAL;
+
+	guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+
+	switch (guehdr->version) {
+	case 0: /* Full GUE header present */
+		break;
+	case 1: {
+		/* Direct encasulation of IPv4 or IPv6 */
+		skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
+
+		switch (((struct iphdr *)guehdr)->version) {
+		case 4:
+			ret = gue6_err_proto_handler(IPPROTO_IPIP, skb, opt,
+						     type, code, offset, info);
+			goto out;
+		case 6:
+			ret = gue6_err_proto_handler(IPPROTO_IPV6, skb, opt,
+						     type, code, offset, info);
+			goto out;
+		default:
+			ret = -EOPNOTSUPP;
+			goto out;
+		}
+	}
+	default: /* Undefined version */
+		return -EOPNOTSUPP;
+	}
+
+	if (guehdr->control)
+		return -ENOENT;
+
+	optlen = guehdr->hlen << 2;
+
+	if (validate_gue_flags(guehdr, optlen))
+		return -EINVAL;
+
+	skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
+	ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
+				     opt, type, code, offset, info);
+
+out:
+	skb_set_transport_header(skb, transport_offset);
+	return ret;
+}
+
+
 static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
 	.encap_hlen = fou_encap_hlen,
 	.build_header = fou6_build_header,
+	.err_handler = gue6_err,
 };
 
 static const struct ip6_tnl_encap_ops gue_ip6tun_ops = {
 	.encap_hlen = gue_encap_hlen,
 	.build_header = gue6_build_header,
+	.err_handler = gue6_err,
 };
 
 static int ip6_tnl_encap_add_fou_ops(void)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index c9c53ad..5d7aa2c 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -84,7 +84,7 @@ static inline struct sock *icmpv6_sk(struct net *net)
 	return net->ipv6.icmp_sk[smp_processor_id()];
 }
 
-static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 		       u8 type, u8 code, int offset, __be32 info)
 {
 	/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
@@ -100,6 +100,8 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	if (!(type & ICMPV6_INFOMSG_MASK))
 		if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
 			ping_err(skb, offset, ntohl(info));
+
+	return 0;
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 3d7c746..5eeeba7 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -99,6 +99,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
 				const int dif, const int sdif, bool exact_dif)
 {
 	int score = -1;
+	bool dev_match;
 
 	if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
 	    sk->sk_family == PF_INET6) {
@@ -109,15 +110,12 @@ static inline int compute_score(struct sock *sk, struct net *net,
 				return -1;
 			score++;
 		}
-		if (sk->sk_bound_dev_if || exact_dif) {
-			bool dev_match = (sk->sk_bound_dev_if == dif ||
-					  sk->sk_bound_dev_if == sdif);
+		dev_match = inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
+						 dif, sdif);
+		if (!dev_match)
+			return -1;
+		score++;
 
-			if (!dev_match)
-				return -1;
-			if (sk->sk_bound_dev_if)
-				score++;
-		}
 		if (sk->sk_incoming_cpu == raw_smp_processor_id())
 			score++;
 	}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 515adbd..81b69bc 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -423,7 +423,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 }
 
 
-static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 		       u8 type, u8 code, int offset, __be32 info)
 {
 	struct net *net = dev_net(skb->dev);
@@ -433,13 +433,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
 			     offset) < 0)
-		return;
+		return -EINVAL;
 
 	ipv6h = (const struct ipv6hdr *)skb->data;
 	t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
 				 tpi.key, tpi.proto);
 	if (!t)
-		return;
+		return -ENOENT;
 
 	switch (type) {
 		struct ipv6_tlv_tnl_enc_lim *tel;
@@ -449,14 +449,14 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 				    t->parms.name);
 		if (code != ICMPV6_PORT_UNREACH)
 			break;
-		return;
+		return 0;
 	case ICMPV6_TIME_EXCEED:
 		if (code == ICMPV6_EXC_HOPLIMIT) {
 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
 					    t->parms.name);
 			break;
 		}
-		return;
+		return 0;
 	case ICMPV6_PARAMPROB:
 		teli = 0;
 		if (code == ICMPV6_HDR_FIELD)
@@ -472,14 +472,14 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
 					    t->parms.name);
 		}
-		return;
+		return 0;
 	case ICMPV6_PKT_TOOBIG:
 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
-		return;
+		return 0;
 	case NDISC_REDIRECT:
 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
 			     sock_net_uid(net, NULL));
-		return;
+		return 0;
 	}
 
 	if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -487,6 +487,8 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	else
 		t->err_count = 1;
 	t->err_time = jiffies;
+
+	return 0;
 }
 
 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 96577e7..3c06cc9 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -319,28 +319,26 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
 /*
  *	Deliver the packet to the host
  */
-
-
-static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
+			      bool have_final)
 {
 	const struct inet6_protocol *ipprot;
 	struct inet6_dev *idev;
 	unsigned int nhoff;
-	int nexthdr;
 	bool raw;
-	bool have_final = false;
 
 	/*
 	 *	Parse extension headers
 	 */
 
-	rcu_read_lock();
 resubmit:
 	idev = ip6_dst_idev(skb_dst(skb));
-	if (!pskb_pull(skb, skb_transport_offset(skb)))
-		goto discard;
 	nhoff = IP6CB(skb)->nhoff;
-	nexthdr = skb_network_header(skb)[nhoff];
+	if (!have_final) {
+		if (!pskb_pull(skb, skb_transport_offset(skb)))
+			goto discard;
+		nexthdr = skb_network_header(skb)[nhoff];
+	}
 
 resubmit_final:
 	raw = raw6_local_deliver(skb, nexthdr);
@@ -359,6 +357,8 @@ static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *sk
 			}
 		} else if (ipprot->flags & INET6_PROTO_FINAL) {
 			const struct ipv6hdr *hdr;
+			int sdif = inet6_sdif(skb);
+			struct net_device *dev;
 
 			/* Only do this once for first final protocol */
 			have_final = true;
@@ -371,9 +371,19 @@ static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *sk
 			skb_postpull_rcsum(skb, skb_network_header(skb),
 					   skb_network_header_len(skb));
 			hdr = ipv6_hdr(skb);
+
+			/* skb->dev passed may be master dev for vrfs. */
+			if (sdif) {
+				dev = dev_get_by_index_rcu(net, sdif);
+				if (!dev)
+					goto discard;
+			} else {
+				dev = skb->dev;
+			}
+
 			if (ipv6_addr_is_multicast(&hdr->daddr) &&
-			    !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
-			    &hdr->saddr) &&
+			    !ipv6_chk_mcast_addr(dev, &hdr->daddr,
+						 &hdr->saddr) &&
 			    !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
 				goto discard;
 		}
@@ -411,13 +421,19 @@ static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *sk
 			consume_skb(skb);
 		}
 	}
-	rcu_read_unlock();
-	return 0;
+	return;
 
 discard:
 	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
-	rcu_read_unlock();
 	kfree_skb(skb);
+}
+
+static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	rcu_read_lock();
+	ip6_protocol_deliver_rcu(net, skb, 0, false);
+	rcu_read_unlock();
+
 	return 0;
 }
 
@@ -432,15 +448,32 @@ EXPORT_SYMBOL_GPL(ip6_input);
 
 int ip6_mc_input(struct sk_buff *skb)
 {
+	int sdif = inet6_sdif(skb);
 	const struct ipv6hdr *hdr;
+	struct net_device *dev;
 	bool deliver;
 
 	__IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
 			 __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST,
 			 skb->len);
 
+	/* skb->dev passed may be master dev for vrfs. */
+	if (sdif) {
+		rcu_read_lock();
+		dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif);
+		if (!dev) {
+			rcu_read_unlock();
+			kfree_skb(skb);
+			return -ENODEV;
+		}
+	} else {
+		dev = skb->dev;
+	}
+
 	hdr = ipv6_hdr(skb);
-	deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
+	deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL);
+	if (sdif)
+		rcu_read_unlock();
 
 #ifdef CONFIG_IPV6_MROUTE
 	/*
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index c7e495f..70f525c3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -229,14 +229,21 @@ static struct sk_buff *ipv6_gro_receive(struct list_head *head,
 		 * XXX skbs on the gro_list have all been parsed and pulled
 		 * already so we don't need to compare nlen
 		 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
-		 * memcmp() alone below is suffcient, right?
+		 * memcmp() alone below is sufficient, right?
 		 */
 		 if ((first_word & htonl(0xF00FFFFF)) ||
-		    memcmp(&iph->nexthdr, &iph2->nexthdr,
-			   nlen - offsetof(struct ipv6hdr, nexthdr))) {
+		    !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+		    !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+		    *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+not_same_flow:
 			NAPI_GRO_CB(p)->same_flow = 0;
 			continue;
 		}
+		if (unlikely(nlen > sizeof(struct ipv6hdr))) {
+			if (memcmp(iph + 1, iph2 + 1,
+				   nlen - sizeof(struct ipv6hdr)))
+				goto not_same_flow;
+		}
 		/* flush if Traffic Class fields are different */
 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
 		NAPI_GRO_CB(p)->flush |= flush;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 381ce38..973e215 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -486,7 +486,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
 				retv = -EFAULT;
 				break;
 		}
-		if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if)
+		if (!sk_dev_equal_l3scope(sk, pkt.ipi6_ifindex))
 			goto e_inval;
 
 		np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5e0efd3..aed7eb5 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -86,9 +86,8 @@ struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
 			    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
 				continue;
 
-			if (sk->sk_bound_dev_if &&
-			    sk->sk_bound_dev_if != dif &&
-			    sk->sk_bound_dev_if != sdif)
+			if (!raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
+						 dif, sdif))
 				continue;
 
 			if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 059f053..194bc16 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2977,7 +2977,8 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
 	if (!rt)
 		goto out;
 
-	rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len);
+	rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
+					       extack);
 	if (IS_ERR(rt->fib6_metrics)) {
 		err = PTR_ERR(rt->fib6_metrics);
 		/* Do not leave garbage there. */
@@ -3710,7 +3711,7 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
 	if (!f6i)
 		return ERR_PTR(-ENOMEM);
 
-	f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0);
+	f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0, NULL);
 	f6i->dst_nocount = true;
 	f6i->dst_host = true;
 	f6i->fib6_protocol = RTPROT_KERNEL;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 03e6b7a..a3f55916 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -349,7 +349,7 @@ static void tcp_v6_mtu_reduced(struct sock *sk)
 	}
 }
 
-static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 		u8 type, u8 code, int offset, __be32 info)
 {
 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
@@ -371,17 +371,19 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	if (!sk) {
 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
 				  ICMP6_MIB_INERRORS);
-		return;
+		return -ENOENT;
 	}
 
 	if (sk->sk_state == TCP_TIME_WAIT) {
 		inet_twsk_put(inet_twsk(sk));
-		return;
+		return 0;
 	}
 	seq = ntohl(th->seq);
 	fatal = icmpv6_err_convert(type, code, &err);
-	if (sk->sk_state == TCP_NEW_SYN_RECV)
-		return tcp_req_err(sk, seq, fatal);
+	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+		tcp_req_err(sk, seq, fatal);
+		return 0;
+	}
 
 	bh_lock_sock(sk);
 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -467,6 +469,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 out:
 	bh_unlock_sock(sk);
 	sock_put(sk);
+	return 0;
 }
 
 
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index dae25ca..1991ded 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -134,24 +134,28 @@ static int tunnel46_rcv(struct sk_buff *skb)
 	return 0;
 }
 
-static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			u8 type, u8 code, int offset, __be32 info)
 {
 	struct xfrm6_tunnel *handler;
 
 	for_each_tunnel_rcu(tunnel6_handlers, handler)
 		if (!handler->err_handler(skb, opt, type, code, offset, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
-static void tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			 u8 type, u8 code, int offset, __be32 info)
 {
 	struct xfrm6_tunnel *handler;
 
 	for_each_tunnel_rcu(tunnel46_handlers, handler)
 		if (!handler->err_handler(skb, opt, type, code, offset, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 static const struct inet6_protocol tunnel6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d2d97d0..09cba4c 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -45,6 +45,7 @@
 #include <net/raw.h>
 #include <net/tcp_states.h>
 #include <net/ip6_checksum.h>
+#include <net/ip6_tunnel.h>
 #include <net/xfrm.h>
 #include <net/inet_hashtables.h>
 #include <net/inet6_hashtables.h>
@@ -117,6 +118,7 @@ static int compute_score(struct sock *sk, struct net *net,
 {
 	int score;
 	struct inet_sock *inet;
+	bool dev_match;
 
 	if (!net_eq(sock_net(sk), net) ||
 	    udp_sk(sk)->udp_port_hash != hnum ||
@@ -144,15 +146,10 @@ static int compute_score(struct sock *sk, struct net *net,
 		score++;
 	}
 
-	if (sk->sk_bound_dev_if || exact_dif) {
-		bool dev_match = (sk->sk_bound_dev_if == dif ||
-				  sk->sk_bound_dev_if == sdif);
-
-		if (!dev_match)
-			return -1;
-		if (sk->sk_bound_dev_if)
-			score++;
-	}
+	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
+	if (!dev_match)
+		return -1;
+	score++;
 
 	if (sk->sk_incoming_cpu == raw_smp_processor_id())
 		score++;
@@ -329,6 +326,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	int err;
 	int is_udplite = IS_UDPLITE(sk);
 	bool checksum_valid = false;
+	struct udp_mib *mib;
 	int is_udp4;
 
 	if (flags & MSG_ERRQUEUE)
@@ -352,6 +350,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 		msg->msg_flags |= MSG_TRUNC;
 
 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
+	mib = __UDPX_MIB(sk, is_udp4);
 
 	/*
 	 * If checksum is needed at all, try to do it while copying the
@@ -380,24 +379,13 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	if (unlikely(err)) {
 		if (!peeked) {
 			atomic_inc(&sk->sk_drops);
-			if (is_udp4)
-				UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
-					      is_udplite);
-			else
-				UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
-					       is_udplite);
+			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 		}
 		kfree_skb(skb);
 		return err;
 	}
-	if (!peeked) {
-		if (is_udp4)
-			UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
-				      is_udplite);
-		else
-			UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
-				       is_udplite);
-	}
+	if (!peeked)
+		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
 
 	sock_recv_ts_and_drops(msg, sk, skb);
 
@@ -421,6 +409,9 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 		*addr_len = sizeof(*sin6);
 	}
 
+	if (udp_sk(sk)->gro_enabled)
+		udp_cmsg_recv(msg, sk, skb);
+
 	if (np->rxopt.all)
 		ip6_datagram_recv_common_ctl(sk, msg, skb);
 
@@ -443,17 +434,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 csum_copy_err:
 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
 				 udp_skb_destructor)) {
-		if (is_udp4) {
-			UDP_INC_STATS(sock_net(sk),
-				      UDP_MIB_CSUMERRORS, is_udplite);
-			UDP_INC_STATS(sock_net(sk),
-				      UDP_MIB_INERRORS, is_udplite);
-		} else {
-			UDP6_INC_STATS(sock_net(sk),
-				       UDP_MIB_CSUMERRORS, is_udplite);
-			UDP6_INC_STATS(sock_net(sk),
-				       UDP_MIB_INERRORS, is_udplite);
-		}
+		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
+		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
 	}
 	kfree_skb(skb);
 
@@ -463,15 +445,106 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	goto try_again;
 }
 
-void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-		    u8 type, u8 code, int offset, __be32 info,
-		    struct udp_table *udptable)
+DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+void udpv6_encap_enable(void)
+{
+	static_branch_inc(&udpv6_encap_needed_key);
+}
+EXPORT_SYMBOL(udpv6_encap_enable);
+
+/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
+ * through error handlers in encapsulations looking for a match.
+ */
+static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
+				      struct inet6_skb_parm *opt,
+				      u8 type, u8 code, int offset, u32 info)
+{
+	int i;
+
+	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
+		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+			       u8 type, u8 code, int offset, u32 info);
+
+		if (!ip6tun_encaps[i])
+			continue;
+		handler = rcu_dereference(ip6tun_encaps[i]->err_handler);
+		if (handler && !handler(skb, opt, type, code, offset, info))
+			return 0;
+	}
+
+	return -ENOENT;
+}
+
+/* Try to match ICMP errors to UDP tunnels by looking up a socket without
+ * reversing source and destination port: this will match tunnels that force the
+ * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
+ * lwtunnels might actually break this assumption by being configured with
+ * different destination ports on endpoints, in this case we won't be able to
+ * trace ICMP messages back to them.
+ *
+ * If this doesn't match any socket, probe tunnels with arbitrary destination
+ * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
+ * we've sent packets to won't necessarily match the local destination port.
+ *
+ * Then ask the tunnel implementation to match the error against a valid
+ * association.
+ *
+ * Return an error if we can't find a match, the socket if we need further
+ * processing, zero otherwise.
+ */
+static struct sock *__udp6_lib_err_encap(struct net *net,
+					 const struct ipv6hdr *hdr, int offset,
+					 struct udphdr *uh,
+					 struct udp_table *udptable,
+					 struct sk_buff *skb,
+					 struct inet6_skb_parm *opt,
+					 u8 type, u8 code, __be32 info)
+{
+	int network_offset, transport_offset;
+	struct sock *sk;
+
+	network_offset = skb_network_offset(skb);
+	transport_offset = skb_transport_offset(skb);
+
+	/* Network header needs to point to the outer IPv6 header inside ICMP */
+	skb_reset_network_header(skb);
+
+	/* Transport header needs to point to the UDP header */
+	skb_set_transport_header(skb, offset);
+
+	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
+			       &hdr->saddr, uh->dest,
+			       inet6_iif(skb), 0, udptable, skb);
+	if (sk) {
+		int (*lookup)(struct sock *sk, struct sk_buff *skb);
+		struct udp_sock *up = udp_sk(sk);
+
+		lookup = READ_ONCE(up->encap_err_lookup);
+		if (!lookup || lookup(sk, skb))
+			sk = NULL;
+	}
+
+	if (!sk) {
+		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
+							offset, info));
+	}
+
+	skb_set_transport_header(skb, transport_offset);
+	skb_set_network_header(skb, network_offset);
+
+	return sk;
+}
+
+int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+		   u8 type, u8 code, int offset, __be32 info,
+		   struct udp_table *udptable)
 {
 	struct ipv6_pinfo *np;
 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 	const struct in6_addr *saddr = &hdr->saddr;
 	const struct in6_addr *daddr = &hdr->daddr;
 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
+	bool tunnel = false;
 	struct sock *sk;
 	int harderr;
 	int err;
@@ -480,9 +553,23 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 			       inet6_iif(skb), inet6_sdif(skb), udptable, skb);
 	if (!sk) {
-		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
-				  ICMP6_MIB_INERRORS);
-		return;
+		/* No socket for error: try tunnels before discarding */
+		sk = ERR_PTR(-ENOENT);
+		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
+						  udptable, skb,
+						  opt, type, code, info);
+			if (!sk)
+				return 0;
+		}
+
+		if (IS_ERR(sk)) {
+			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+					  ICMP6_MIB_INERRORS);
+			return PTR_ERR(sk);
+		}
+
+		tunnel = true;
 	}
 
 	harderr = icmpv6_err_convert(type, code, &err);
@@ -496,10 +583,19 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			harderr = 1;
 	}
 	if (type == NDISC_REDIRECT) {
-		ip6_sk_redirect(skb, sk);
+		if (tunnel) {
+			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
+				     sk->sk_mark, sk->sk_uid);
+		} else {
+			ip6_sk_redirect(skb, sk);
+		}
 		goto out;
 	}
 
+	/* Tunnels don't have an application socket: don't pass errors back */
+	if (tunnel)
+		goto out;
+
 	if (!np->recverr) {
 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 			goto out;
@@ -510,7 +606,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	sk->sk_err = err;
 	sk->sk_error_report(sk);
 out:
-	return;
+	return 0;
 }
 
 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -541,21 +637,14 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 	return 0;
 }
 
-static __inline__ void udpv6_err(struct sk_buff *skb,
-				 struct inet6_skb_parm *opt, u8 type,
-				 u8 code, int offset, __be32 info)
+static __inline__ int udpv6_err(struct sk_buff *skb,
+				struct inet6_skb_parm *opt, u8 type,
+				u8 code, int offset, __be32 info)
 {
-	__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
+	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
 }
 
-DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
-void udpv6_encap_enable(void)
-{
-	static_branch_enable(&udpv6_encap_needed_key);
-}
-EXPORT_SYMBOL(udpv6_encap_enable);
-
-static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 {
 	struct udp_sock *up = udp_sk(sk);
 	int is_udplite = IS_UDPLITE(sk);
@@ -638,10 +727,32 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 	return -1;
 }
 
+static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+	struct sk_buff *next, *segs;
+	int ret;
+
+	if (likely(!udp_unexpected_gso(sk, skb)))
+		return udpv6_queue_rcv_one_skb(sk, skb);
+
+	__skb_push(skb, -skb_mac_offset(skb));
+	segs = udp_rcv_segment(sk, skb, false);
+	for (skb = segs; skb; skb = next) {
+		next = skb->next;
+		__skb_pull(skb, skb_transport_offset(skb));
+
+		ret = udpv6_queue_rcv_one_skb(sk, skb);
+		if (ret > 0)
+			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
+						 true);
+	}
+	return 0;
+}
+
 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
 				   __be16 loc_port, const struct in6_addr *loc_addr,
 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
-				   int dif, unsigned short hnum)
+				   int dif, int sdif, unsigned short hnum)
 {
 	struct inet_sock *inet = inet_sk(sk);
 
@@ -653,7 +764,7 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
-	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
+	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
 		return false;
@@ -687,6 +798,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 	unsigned int offset = offsetof(typeof(*sk), sk_node);
 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 	int dif = inet6_iif(skb);
+	int sdif = inet6_sdif(skb);
 	struct hlist_node *node;
 	struct sk_buff *nskb;
 
@@ -701,7 +813,8 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 
 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
-					    uh->source, saddr, dif, hnum))
+					    uh->source, saddr, dif, sdif,
+					    hnum))
 			continue;
 		/* If zero checksum and no_check is not on for
 		 * the socket then skip it.
@@ -1458,11 +1571,15 @@ void udpv6_destroy_sock(struct sock *sk)
 	udp_v6_flush_pending_frames(sk);
 	release_sock(sk);
 
-	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
-		void (*encap_destroy)(struct sock *sk);
-		encap_destroy = READ_ONCE(up->encap_destroy);
-		if (encap_destroy)
-			encap_destroy(sk);
+	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+		if (up->encap_type) {
+			void (*encap_destroy)(struct sock *sk);
+			encap_destroy = READ_ONCE(up->encap_destroy);
+			if (encap_destroy)
+				encap_destroy(sk);
+		}
+		if (up->encap_enabled)
+			static_branch_dec(&udpv6_encap_needed_key);
 	}
 
 	inet6_destroy_sock(sk);
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 7903e21..5730e65 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -9,8 +9,8 @@
 #include <net/transp_v6.h>
 
 int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
-void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
-		    __be32, struct udp_table *);
+int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
+		   __be32, struct udp_table *);
 
 int udp_v6_get_port(struct sock *sk, unsigned short snum);
 
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 1b8e161..828b245 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -147,13 +147,9 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
-	if (uh->check) {
-		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+	if (uh->check)
 		uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
 					  &ipv6h->daddr, 0);
-	} else {
-		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
-	}
 
 	return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
 }
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 5000ad6..a125aeb 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -20,11 +20,12 @@ static int udplitev6_rcv(struct sk_buff *skb)
 	return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
 }
 
-static void udplitev6_err(struct sk_buff *skb,
+static int udplitev6_err(struct sk_buff *skb,
 			  struct inet6_skb_parm *opt,
 			  u8 type, u8 code, int offset, __be32 info)
 {
-	__udp6_lib_err(skb, opt, type, code, offset, info, &udplite_table);
+	return __udp6_lib_err(skb, opt, type, code, offset, info,
+			      &udplite_table);
 }
 
 static const struct inet6_protocol udplitev6_protocol = {
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
index b2dc8ce..cc979b7 100644
--- a/net/ipv6/xfrm6_protocol.c
+++ b/net/ipv6/xfrm6_protocol.c
@@ -80,14 +80,16 @@ static int xfrm6_esp_rcv(struct sk_buff *skb)
 	return 0;
 }
 
-static void xfrm6_esp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int xfrm6_esp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			  u8 type, u8 code, int offset, __be32 info)
 {
 	struct xfrm6_protocol *handler;
 
 	for_each_protocol_rcu(esp6_handlers, handler)
 		if (!handler->err_handler(skb, opt, type, code, offset, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 static int xfrm6_ah_rcv(struct sk_buff *skb)
@@ -107,14 +109,16 @@ static int xfrm6_ah_rcv(struct sk_buff *skb)
 	return 0;
 }
 
-static void xfrm6_ah_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int xfrm6_ah_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			 u8 type, u8 code, int offset, __be32 info)
 {
 	struct xfrm6_protocol *handler;
 
 	for_each_protocol_rcu(ah6_handlers, handler)
 		if (!handler->err_handler(skb, opt, type, code, offset, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 static int xfrm6_ipcomp_rcv(struct sk_buff *skb)
@@ -134,14 +138,16 @@ static int xfrm6_ipcomp_rcv(struct sk_buff *skb)
 	return 0;
 }
 
-static void xfrm6_ipcomp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int xfrm6_ipcomp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			     u8 type, u8 code, int offset, __be32 info)
 {
 	struct xfrm6_protocol *handler;
 
 	for_each_protocol_rcu(ipcomp6_handlers, handler)
 		if (!handler->err_handler(skb, opt, type, code, offset, info))
-			break;
+			return 0;
+
+	return -ENOENT;
 }
 
 static const struct inet6_protocol esp6_protocol = {
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 0bed4cc..78ea5a7 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1873,30 +1873,26 @@ static void iucv_callback_txdone(struct iucv_path *path,
 	struct sock *sk = path->private;
 	struct sk_buff *this = NULL;
 	struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
-	struct sk_buff *list_skb = list->next;
+	struct sk_buff *list_skb;
 	unsigned long flags;
 
 	bh_lock_sock(sk);
-	if (!skb_queue_empty(list)) {
-		spin_lock_irqsave(&list->lock, flags);
 
-		while (list_skb != (struct sk_buff *)list) {
-			if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
-				this = list_skb;
-				break;
-			}
-			list_skb = list_skb->next;
+	spin_lock_irqsave(&list->lock, flags);
+	skb_queue_walk(list, list_skb) {
+		if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
+			this = list_skb;
+			break;
 		}
-		if (this)
-			__skb_unlink(this, list);
+	}
+	if (this)
+		__skb_unlink(this, list);
+	spin_unlock_irqrestore(&list->lock, flags);
 
-		spin_unlock_irqrestore(&list->lock, flags);
-
-		if (this) {
-			kfree_skb(this);
-			/* wake up any process waiting for sending */
-			iucv_sock_wake_msglim(sk);
-		}
+	if (this) {
+		kfree_skb(this);
+		/* wake up any process waiting for sending */
+		iucv_sock_wake_msglim(sk);
 	}
 
 	if (sk->sk_state == IUCV_CLOSING) {
@@ -2284,11 +2280,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
 
 	list = &iucv->send_skb_q;
 	spin_lock_irqsave(&list->lock, flags);
-	if (skb_queue_empty(list))
-		goto out_unlock;
-	list_skb = list->next;
-	nskb = list_skb->next;
-	while (list_skb != (struct sk_buff *)list) {
+	skb_queue_walk_safe(list, list_skb, nskb) {
 		if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
 			switch (n) {
 			case TX_NOTIFY_OK:
@@ -2321,10 +2313,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
 			}
 			break;
 		}
-		list_skb = nskb;
-		nskb = nskb->next;
 	}
-out_unlock:
 	spin_unlock_irqrestore(&list->lock, flags);
 
 	if (sk->sk_state == IUCV_CLOSING) {
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 1dae77c..9e3642b 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -222,6 +222,10 @@ struct ncsi_package {
 	unsigned int         channel_num; /* Number of channels     */
 	struct list_head     channels;    /* List of chanels        */
 	struct list_head     node;        /* Form list of packages  */
+
+	bool                 multi_channel; /* Enable multiple channels  */
+	u32                  channel_whitelist; /* Channels to configure */
+	struct ncsi_channel  *preferred_channel; /* Primary channel      */
 };
 
 struct ncsi_request {
@@ -287,16 +291,16 @@ struct ncsi_dev_priv {
 #define NCSI_DEV_PROBED		1            /* Finalized NCSI topology    */
 #define NCSI_DEV_HWA		2            /* Enabled HW arbitration     */
 #define NCSI_DEV_RESHUFFLE	4
+#define NCSI_DEV_RESET		8            /* Reset state of NC          */
 	unsigned int        gma_flag;        /* OEM GMA flag               */
 	spinlock_t          lock;            /* Protect the NCSI device    */
 #if IS_ENABLED(CONFIG_IPV6)
 	unsigned int        inet6_addr_num;  /* Number of IPv6 addresses   */
 #endif
+	unsigned int        package_probe_id;/* Current ID during probe    */
 	unsigned int        package_num;     /* Number of packages         */
 	struct list_head    packages;        /* List of packages           */
 	struct ncsi_channel *hot_channel;    /* Channel was ever active    */
-	struct ncsi_package *force_package;  /* Force a specific package   */
-	struct ncsi_channel *force_channel;  /* Force a specific channel   */
 	struct ncsi_request requests[256];   /* Request table              */
 	unsigned int        request_id;      /* Last used request ID       */
 #define NCSI_REQ_START_IDX	1
@@ -309,6 +313,9 @@ struct ncsi_dev_priv {
 	struct list_head    node;            /* Form NCSI device list      */
 #define NCSI_MAX_VLAN_VIDS	15
 	struct list_head    vlan_vids;       /* List of active VLAN IDs */
+
+	bool                multi_package;   /* Enable multiple packages   */
+	u32                 package_whitelist; /* Packages to configure    */
 };
 
 struct ncsi_cmd_arg {
@@ -341,6 +348,7 @@ extern spinlock_t ncsi_dev_lock;
 	list_for_each_entry_rcu(nc, &np->channels, node)
 
 /* Resources */
+int ncsi_reset_dev(struct ncsi_dev *nd);
 void ncsi_start_channel_monitor(struct ncsi_channel *nc);
 void ncsi_stop_channel_monitor(struct ncsi_channel *nc);
 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
@@ -361,6 +369,13 @@ struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 void ncsi_free_request(struct ncsi_request *nr);
 struct ncsi_dev *ncsi_find_dev(struct net_device *dev);
 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp);
+bool ncsi_channel_has_link(struct ncsi_channel *channel);
+bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
+			  struct ncsi_channel *channel);
+int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
+			   struct ncsi_package *np,
+			   struct ncsi_channel *disable,
+			   struct ncsi_channel *enable);
 
 /* Packet handlers */
 u32 ncsi_calculate_checksum(unsigned char *data, int len);
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 25e483e..26d67e2 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -50,13 +50,15 @@ static int ncsi_validate_aen_pkt(struct ncsi_aen_pkt_hdr *h,
 static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
 				struct ncsi_aen_pkt_hdr *h)
 {
-	struct ncsi_aen_lsc_pkt *lsc;
-	struct ncsi_channel *nc;
+	struct ncsi_channel *nc, *tmp;
 	struct ncsi_channel_mode *ncm;
+	unsigned long old_data, data;
+	struct ncsi_aen_lsc_pkt *lsc;
+	struct ncsi_package *np;
+	bool had_link, has_link;
+	unsigned long flags;
 	bool chained;
 	int state;
-	unsigned long old_data, data;
-	unsigned long flags;
 
 	/* Find the NCSI channel */
 	ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc);
@@ -73,6 +75,9 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
 	ncm->data[2] = data;
 	ncm->data[4] = ntohl(lsc->oem_status);
 
+	had_link = !!(old_data & 0x1);
+	has_link = !!(data & 0x1);
+
 	netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
 		   nc->id, data & 0x1 ? "up" : "down");
 
@@ -80,22 +85,60 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
 	state = nc->state;
 	spin_unlock_irqrestore(&nc->lock, flags);
 
-	if (!((old_data ^ data) & 0x1) || chained)
-		return 0;
-	if (!(state == NCSI_CHANNEL_INACTIVE && (data & 0x1)) &&
-	    !(state == NCSI_CHANNEL_ACTIVE && !(data & 0x1)))
+	if (state == NCSI_CHANNEL_INACTIVE)
+		netdev_warn(ndp->ndev.dev,
+			    "NCSI: Inactive channel %u received AEN!\n",
+			    nc->id);
+
+	if ((had_link == has_link) || chained)
 		return 0;
 
-	if (!(ndp->flags & NCSI_DEV_HWA) &&
-	    state == NCSI_CHANNEL_ACTIVE)
-		ndp->flags |= NCSI_DEV_RESHUFFLE;
+	if (!ndp->multi_package && !nc->package->multi_channel) {
+		if (had_link) {
+			ndp->flags |= NCSI_DEV_RESHUFFLE;
+			ncsi_stop_channel_monitor(nc);
+			spin_lock_irqsave(&ndp->lock, flags);
+			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
+			spin_unlock_irqrestore(&ndp->lock, flags);
+			return ncsi_process_next_channel(ndp);
+		}
+		/* Configured channel came up */
+		return 0;
+	}
 
-	ncsi_stop_channel_monitor(nc);
-	spin_lock_irqsave(&ndp->lock, flags);
-	list_add_tail_rcu(&nc->link, &ndp->channel_queue);
-	spin_unlock_irqrestore(&ndp->lock, flags);
+	if (had_link) {
+		ncm = &nc->modes[NCSI_MODE_TX_ENABLE];
+		if (ncsi_channel_is_last(ndp, nc)) {
+			/* No channels left, reconfigure */
+			return ncsi_reset_dev(&ndp->ndev);
+		} else if (ncm->enable) {
+			/* Need to failover Tx channel */
+			ncsi_update_tx_channel(ndp, nc->package, nc, NULL);
+		}
+	} else if (has_link && nc->package->preferred_channel == nc) {
+		/* Return Tx to preferred channel */
+		ncsi_update_tx_channel(ndp, nc->package, NULL, nc);
+	} else if (has_link) {
+		NCSI_FOR_EACH_PACKAGE(ndp, np) {
+			NCSI_FOR_EACH_CHANNEL(np, tmp) {
+				/* Enable Tx on this channel if the current Tx
+				 * channel is down.
+				 */
+				ncm = &tmp->modes[NCSI_MODE_TX_ENABLE];
+				if (ncm->enable &&
+				    !ncsi_channel_has_link(tmp)) {
+					ncsi_update_tx_channel(ndp, nc->package,
+							       tmp, nc);
+					break;
+				}
+			}
+		}
+	}
 
-	return ncsi_process_next_channel(ndp);
+	/* Leave configured channels active in a multi-channel scenario so
+	 * AEN events are still received.
+	 */
+	return 0;
 }
 
 static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index bfc43b2..92e59f0 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -28,6 +28,29 @@
 LIST_HEAD(ncsi_dev_list);
 DEFINE_SPINLOCK(ncsi_dev_lock);
 
+bool ncsi_channel_has_link(struct ncsi_channel *channel)
+{
+	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
+}
+
+bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
+			  struct ncsi_channel *channel)
+{
+	struct ncsi_package *np;
+	struct ncsi_channel *nc;
+
+	NCSI_FOR_EACH_PACKAGE(ndp, np)
+		NCSI_FOR_EACH_CHANNEL(np, nc) {
+			if (nc == channel)
+				continue;
+			if (nc->state == NCSI_CHANNEL_ACTIVE &&
+			    ncsi_channel_has_link(nc))
+				return false;
+		}
+
+	return true;
+}
+
 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
 {
 	struct ncsi_dev *nd = &ndp->ndev;
@@ -52,7 +75,7 @@ static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
 				continue;
 			}
 
-			if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
+			if (ncsi_channel_has_link(nc)) {
 				spin_unlock_irqrestore(&nc->lock, flags);
 				nd->link_up = 1;
 				goto report;
@@ -113,10 +136,8 @@ static void ncsi_channel_monitor(struct timer_list *t)
 	default:
 		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
 			   nc->id);
-		if (!(ndp->flags & NCSI_DEV_HWA)) {
-			ncsi_report_link(ndp, true);
-			ndp->flags |= NCSI_DEV_RESHUFFLE;
-		}
+		ncsi_report_link(ndp, true);
+		ndp->flags |= NCSI_DEV_RESHUFFLE;
 
 		ncsi_stop_channel_monitor(nc);
 
@@ -269,6 +290,7 @@ struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 	np->ndp = ndp;
 	spin_lock_init(&np->lock);
 	INIT_LIST_HEAD(&np->channels);
+	np->channel_whitelist = UINT_MAX;
 
 	spin_lock_irqsave(&ndp->lock, flags);
 	tmp = ncsi_find_package(ndp, id);
@@ -442,12 +464,14 @@ static void ncsi_request_timeout(struct timer_list *t)
 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 {
 	struct ncsi_dev *nd = &ndp->ndev;
-	struct ncsi_package *np = ndp->active_package;
-	struct ncsi_channel *nc = ndp->active_channel;
+	struct ncsi_package *np;
+	struct ncsi_channel *nc, *tmp;
 	struct ncsi_cmd_arg nca;
 	unsigned long flags;
 	int ret;
 
+	np = ndp->active_package;
+	nc = ndp->active_channel;
 	nca.ndp = ndp;
 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 	switch (nd->state) {
@@ -523,6 +547,15 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 		if (ret)
 			goto error;
 
+		NCSI_FOR_EACH_CHANNEL(np, tmp) {
+			/* If there is another channel active on this package
+			 * do not deselect the package.
+			 */
+			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
+				nd->state = ncsi_dev_state_suspend_done;
+				break;
+			}
+		}
 		break;
 	case ncsi_dev_state_suspend_deselect:
 		ndp->pending_req_num = 1;
@@ -541,8 +574,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 		spin_lock_irqsave(&nc->lock, flags);
 		nc->state = NCSI_CHANNEL_INACTIVE;
 		spin_unlock_irqrestore(&nc->lock, flags);
-		ncsi_process_next_channel(ndp);
-
+		if (ndp->flags & NCSI_DEV_RESET)
+			ncsi_reset_dev(nd);
+		else
+			ncsi_process_next_channel(ndp);
 		break;
 	default:
 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
@@ -717,13 +752,144 @@ static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
 
 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
 
+/* Determine if a given channel from the channel_queue should be used for Tx */
+static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
+			       struct ncsi_channel *nc)
+{
+	struct ncsi_channel_mode *ncm;
+	struct ncsi_channel *channel;
+	struct ncsi_package *np;
+
+	/* Check if any other channel has Tx enabled; a channel may have already
+	 * been configured and removed from the channel queue.
+	 */
+	NCSI_FOR_EACH_PACKAGE(ndp, np) {
+		if (!ndp->multi_package && np != nc->package)
+			continue;
+		NCSI_FOR_EACH_CHANNEL(np, channel) {
+			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
+			if (ncm->enable)
+				return false;
+		}
+	}
+
+	/* This channel is the preferred channel and has link */
+	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
+		np = channel->package;
+		if (np->preferred_channel &&
+		    ncsi_channel_has_link(np->preferred_channel)) {
+			return np->preferred_channel == nc;
+		}
+	}
+
+	/* This channel has link */
+	if (ncsi_channel_has_link(nc))
+		return true;
+
+	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
+		if (ncsi_channel_has_link(channel))
+			return false;
+
+	/* No other channel has link; default to this one */
+	return true;
+}
+
+/* Change the active Tx channel in a multi-channel setup */
+int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
+			   struct ncsi_package *package,
+			   struct ncsi_channel *disable,
+			   struct ncsi_channel *enable)
+{
+	struct ncsi_cmd_arg nca;
+	struct ncsi_channel *nc;
+	struct ncsi_package *np;
+	int ret = 0;
+
+	if (!package->multi_channel && !ndp->multi_package)
+		netdev_warn(ndp->ndev.dev,
+			    "NCSI: Trying to update Tx channel in single-channel mode\n");
+	nca.ndp = ndp;
+	nca.req_flags = 0;
+
+	/* Find current channel with Tx enabled */
+	NCSI_FOR_EACH_PACKAGE(ndp, np) {
+		if (disable)
+			break;
+		if (!ndp->multi_package && np != package)
+			continue;
+
+		NCSI_FOR_EACH_CHANNEL(np, nc)
+			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
+				disable = nc;
+				break;
+			}
+	}
+
+	/* Find a suitable channel for Tx */
+	NCSI_FOR_EACH_PACKAGE(ndp, np) {
+		if (enable)
+			break;
+		if (!ndp->multi_package && np != package)
+			continue;
+		if (!(ndp->package_whitelist & (0x1 << np->id)))
+			continue;
+
+		if (np->preferred_channel &&
+		    ncsi_channel_has_link(np->preferred_channel)) {
+			enable = np->preferred_channel;
+			break;
+		}
+
+		NCSI_FOR_EACH_CHANNEL(np, nc) {
+			if (!(np->channel_whitelist & 0x1 << nc->id))
+				continue;
+			if (nc->state != NCSI_CHANNEL_ACTIVE)
+				continue;
+			if (ncsi_channel_has_link(nc)) {
+				enable = nc;
+				break;
+			}
+		}
+	}
+
+	if (disable == enable)
+		return -1;
+
+	if (!enable)
+		return -1;
+
+	if (disable) {
+		nca.channel = disable->id;
+		nca.package = disable->package->id;
+		nca.type = NCSI_PKT_CMD_DCNT;
+		ret = ncsi_xmit_cmd(&nca);
+		if (ret)
+			netdev_err(ndp->ndev.dev,
+				   "Error %d sending DCNT\n",
+				   ret);
+	}
+
+	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
+
+	nca.channel = enable->id;
+	nca.package = enable->package->id;
+	nca.type = NCSI_PKT_CMD_ECNT;
+	ret = ncsi_xmit_cmd(&nca);
+	if (ret)
+		netdev_err(ndp->ndev.dev,
+			   "Error %d sending ECNT\n",
+			   ret);
+
+	return ret;
+}
+
 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 {
-	struct ncsi_dev *nd = &ndp->ndev;
-	struct net_device *dev = nd->dev;
 	struct ncsi_package *np = ndp->active_package;
 	struct ncsi_channel *nc = ndp->active_channel;
 	struct ncsi_channel *hot_nc = NULL;
+	struct ncsi_dev *nd = &ndp->ndev;
+	struct net_device *dev = nd->dev;
 	struct ncsi_cmd_arg nca;
 	unsigned char index;
 	unsigned long flags;
@@ -845,20 +1011,29 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 		} else if (nd->state == ncsi_dev_state_config_ebf) {
 			nca.type = NCSI_PKT_CMD_EBF;
 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
-			nd->state = ncsi_dev_state_config_ecnt;
+			if (ncsi_channel_is_tx(ndp, nc))
+				nd->state = ncsi_dev_state_config_ecnt;
+			else
+				nd->state = ncsi_dev_state_config_ec;
 #if IS_ENABLED(CONFIG_IPV6)
 			if (ndp->inet6_addr_num > 0 &&
 			    (nc->caps[NCSI_CAP_GENERIC].cap &
 			     NCSI_CAP_GENERIC_MC))
 				nd->state = ncsi_dev_state_config_egmf;
-			else
-				nd->state = ncsi_dev_state_config_ecnt;
 		} else if (nd->state == ncsi_dev_state_config_egmf) {
 			nca.type = NCSI_PKT_CMD_EGMF;
 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
-			nd->state = ncsi_dev_state_config_ecnt;
+			if (ncsi_channel_is_tx(ndp, nc))
+				nd->state = ncsi_dev_state_config_ecnt;
+			else
+				nd->state = ncsi_dev_state_config_ec;
 #endif /* CONFIG_IPV6 */
 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
+			if (np->preferred_channel &&
+			    nc != np->preferred_channel)
+				netdev_info(ndp->ndev.dev,
+					    "NCSI: Tx failed over to channel %u\n",
+					    nc->id);
 			nca.type = NCSI_PKT_CMD_ECNT;
 			nd->state = ncsi_dev_state_config_ec;
 		} else if (nd->state == ncsi_dev_state_config_ec) {
@@ -889,6 +1064,16 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
 			   nc->id);
 		spin_lock_irqsave(&nc->lock, flags);
+		nc->state = NCSI_CHANNEL_ACTIVE;
+
+		if (ndp->flags & NCSI_DEV_RESET) {
+			/* A reset event happened during config, start it now */
+			nc->reconfigure_needed = false;
+			spin_unlock_irqrestore(&nc->lock, flags);
+			ncsi_reset_dev(nd);
+			break;
+		}
+
 		if (nc->reconfigure_needed) {
 			/* This channel's configuration has been updated
 			 * part-way during the config state - start the
@@ -909,10 +1094,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 
 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
 			hot_nc = nc;
-			nc->state = NCSI_CHANNEL_ACTIVE;
 		} else {
 			hot_nc = NULL;
-			nc->state = NCSI_CHANNEL_INACTIVE;
 			netdev_dbg(ndp->ndev.dev,
 				   "NCSI: channel %u link down after config\n",
 				   nc->id);
@@ -940,43 +1123,35 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 
 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
 {
-	struct ncsi_package *np, *force_package;
-	struct ncsi_channel *nc, *found, *hot_nc, *force_channel;
+	struct ncsi_channel *nc, *found, *hot_nc;
 	struct ncsi_channel_mode *ncm;
-	unsigned long flags;
+	unsigned long flags, cflags;
+	struct ncsi_package *np;
+	bool with_link;
 
 	spin_lock_irqsave(&ndp->lock, flags);
 	hot_nc = ndp->hot_channel;
-	force_channel = ndp->force_channel;
-	force_package = ndp->force_package;
 	spin_unlock_irqrestore(&ndp->lock, flags);
 
-	/* Force a specific channel whether or not it has link if we have been
-	 * configured to do so
-	 */
-	if (force_package && force_channel) {
-		found = force_channel;
-		ncm = &found->modes[NCSI_MODE_LINK];
-		if (!(ncm->data[2] & 0x1))
-			netdev_info(ndp->ndev.dev,
-				    "NCSI: Channel %u forced, but it is link down\n",
-				    found->id);
-		goto out;
-	}
-
-	/* The search is done once an inactive channel with up
-	 * link is found.
+	/* By default the search is done once an inactive channel with up
+	 * link is found, unless a preferred channel is set.
+	 * If multi_package or multi_channel are configured all channels in the
+	 * whitelist are added to the channel queue.
 	 */
 	found = NULL;
+	with_link = false;
 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
-		if (ndp->force_package && np != ndp->force_package)
+		if (!(ndp->package_whitelist & (0x1 << np->id)))
 			continue;
 		NCSI_FOR_EACH_CHANNEL(np, nc) {
-			spin_lock_irqsave(&nc->lock, flags);
+			if (!(np->channel_whitelist & (0x1 << nc->id)))
+				continue;
+
+			spin_lock_irqsave(&nc->lock, cflags);
 
 			if (!list_empty(&nc->link) ||
 			    nc->state != NCSI_CHANNEL_INACTIVE) {
-				spin_unlock_irqrestore(&nc->lock, flags);
+				spin_unlock_irqrestore(&nc->lock, cflags);
 				continue;
 			}
 
@@ -988,32 +1163,49 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
 
 			ncm = &nc->modes[NCSI_MODE_LINK];
 			if (ncm->data[2] & 0x1) {
-				spin_unlock_irqrestore(&nc->lock, flags);
 				found = nc;
-				goto out;
+				with_link = true;
 			}
 
-			spin_unlock_irqrestore(&nc->lock, flags);
+			/* If multi_channel is enabled configure all valid
+			 * channels whether or not they currently have link
+			 * so they will have AENs enabled.
+			 */
+			if (with_link || np->multi_channel) {
+				spin_lock_irqsave(&ndp->lock, flags);
+				list_add_tail_rcu(&nc->link,
+						  &ndp->channel_queue);
+				spin_unlock_irqrestore(&ndp->lock, flags);
+
+				netdev_dbg(ndp->ndev.dev,
+					   "NCSI: Channel %u added to queue (link %s)\n",
+					   nc->id,
+					   ncm->data[2] & 0x1 ? "up" : "down");
+			}
+
+			spin_unlock_irqrestore(&nc->lock, cflags);
+
+			if (with_link && !np->multi_channel)
+				break;
 		}
+		if (with_link && !ndp->multi_package)
+			break;
 	}
 
-	if (!found) {
+	if (list_empty(&ndp->channel_queue) && found) {
+		netdev_info(ndp->ndev.dev,
+			    "NCSI: No channel with link found, configuring channel %u\n",
+			    found->id);
+		spin_lock_irqsave(&ndp->lock, flags);
+		list_add_tail_rcu(&found->link, &ndp->channel_queue);
+		spin_unlock_irqrestore(&ndp->lock, flags);
+	} else if (!found) {
 		netdev_warn(ndp->ndev.dev,
-			    "NCSI: No channel found with link\n");
+			    "NCSI: No channel found to configure!\n");
 		ncsi_report_link(ndp, true);
 		return -ENODEV;
 	}
 
-	ncm = &found->modes[NCSI_MODE_LINK];
-	netdev_dbg(ndp->ndev.dev,
-		   "NCSI: Channel %u added to queue (link %s)\n",
-		   found->id, ncm->data[2] & 0x1 ? "up" : "down");
-
-out:
-	spin_lock_irqsave(&ndp->lock, flags);
-	list_add_tail_rcu(&found->link, &ndp->channel_queue);
-	spin_unlock_irqrestore(&ndp->lock, flags);
-
 	return ncsi_process_next_channel(ndp);
 }
 
@@ -1050,35 +1242,6 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
 	return false;
 }
 
-static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
-{
-	struct ncsi_package *np;
-	struct ncsi_channel *nc;
-	unsigned long flags;
-
-	/* Move all available channels to processing queue */
-	spin_lock_irqsave(&ndp->lock, flags);
-	NCSI_FOR_EACH_PACKAGE(ndp, np) {
-		NCSI_FOR_EACH_CHANNEL(np, nc) {
-			WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
-				     !list_empty(&nc->link));
-			ncsi_stop_channel_monitor(nc);
-			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
-		}
-	}
-	spin_unlock_irqrestore(&ndp->lock, flags);
-
-	/* We can have no channels in extremely case */
-	if (list_empty(&ndp->channel_queue)) {
-		netdev_err(ndp->ndev.dev,
-			   "NCSI: No available channels for HWA\n");
-		ncsi_report_link(ndp, false);
-		return -ENOENT;
-	}
-
-	return ncsi_process_next_channel(ndp);
-}
-
 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 {
 	struct ncsi_dev *nd = &ndp->ndev;
@@ -1110,70 +1273,28 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 		nd->state = ncsi_dev_state_probe_package;
 		break;
 	case ncsi_dev_state_probe_package:
-		ndp->pending_req_num = 16;
-
-		/* Select all possible packages */
-		nca.type = NCSI_PKT_CMD_SP;
-		nca.bytes[0] = 1;
-		nca.channel = NCSI_RESERVED_CHANNEL;
-		for (index = 0; index < 8; index++) {
-			nca.package = index;
-			ret = ncsi_xmit_cmd(&nca);
-			if (ret)
-				goto error;
-		}
-
-		/* Disable all possible packages */
-		nca.type = NCSI_PKT_CMD_DP;
-		for (index = 0; index < 8; index++) {
-			nca.package = index;
-			ret = ncsi_xmit_cmd(&nca);
-			if (ret)
-				goto error;
-		}
-
-		nd->state = ncsi_dev_state_probe_channel;
-		break;
-	case ncsi_dev_state_probe_channel:
-		if (!ndp->active_package)
-			ndp->active_package = list_first_or_null_rcu(
-				&ndp->packages, struct ncsi_package, node);
-		else if (list_is_last(&ndp->active_package->node,
-				      &ndp->packages))
-			ndp->active_package = NULL;
-		else
-			ndp->active_package = list_next_entry(
-				ndp->active_package, node);
-
-		/* All available packages and channels are enumerated. The
-		 * enumeration happens for once when the NCSI interface is
-		 * started. So we need continue to start the interface after
-		 * the enumeration.
-		 *
-		 * We have to choose an active channel before configuring it.
-		 * Note that we possibly don't have active channel in extreme
-		 * situation.
-		 */
-		if (!ndp->active_package) {
-			ndp->flags |= NCSI_DEV_PROBED;
-			if (ncsi_check_hwa(ndp))
-				ncsi_enable_hwa(ndp);
-			else
-				ncsi_choose_active_channel(ndp);
-			return;
-		}
-
-		/* Select the active package */
 		ndp->pending_req_num = 1;
+
 		nca.type = NCSI_PKT_CMD_SP;
 		nca.bytes[0] = 1;
-		nca.package = ndp->active_package->id;
+		nca.package = ndp->package_probe_id;
 		nca.channel = NCSI_RESERVED_CHANNEL;
 		ret = ncsi_xmit_cmd(&nca);
 		if (ret)
 			goto error;
-
+		nd->state = ncsi_dev_state_probe_channel;
+		break;
+	case ncsi_dev_state_probe_channel:
+		ndp->active_package = ncsi_find_package(ndp,
+							ndp->package_probe_id);
+		if (!ndp->active_package) {
+			/* No response */
+			nd->state = ncsi_dev_state_probe_dp;
+			schedule_work(&ndp->work);
+			break;
+		}
 		nd->state = ncsi_dev_state_probe_cis;
+		schedule_work(&ndp->work);
 		break;
 	case ncsi_dev_state_probe_cis:
 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
@@ -1222,22 +1343,35 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 	case ncsi_dev_state_probe_dp:
 		ndp->pending_req_num = 1;
 
-		/* Deselect the active package */
+		/* Deselect the current package */
 		nca.type = NCSI_PKT_CMD_DP;
-		nca.package = ndp->active_package->id;
+		nca.package = ndp->package_probe_id;
 		nca.channel = NCSI_RESERVED_CHANNEL;
 		ret = ncsi_xmit_cmd(&nca);
 		if (ret)
 			goto error;
 
-		/* Scan channels in next package */
-		nd->state = ncsi_dev_state_probe_channel;
+		/* Probe next package */
+		ndp->package_probe_id++;
+		if (ndp->package_probe_id >= 8) {
+			/* Probe finished */
+			ndp->flags |= NCSI_DEV_PROBED;
+			break;
+		}
+		nd->state = ncsi_dev_state_probe_package;
+		ndp->active_package = NULL;
 		break;
 	default:
 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
 			    nd->state);
 	}
 
+	if (ndp->flags & NCSI_DEV_PROBED) {
+		/* Check if all packages have HWA support */
+		ncsi_check_hwa(ndp);
+		ncsi_choose_active_channel(ndp);
+	}
+
 	return;
 error:
 	netdev_err(ndp->ndev.dev,
@@ -1556,6 +1690,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
 	INIT_LIST_HEAD(&ndp->channel_queue);
 	INIT_LIST_HEAD(&ndp->vlan_vids);
 	INIT_WORK(&ndp->work, ncsi_dev_work);
+	ndp->package_whitelist = UINT_MAX;
 
 	/* Initialize private NCSI device */
 	spin_lock_init(&ndp->lock);
@@ -1592,26 +1727,19 @@ EXPORT_SYMBOL_GPL(ncsi_register_dev);
 int ncsi_start_dev(struct ncsi_dev *nd)
 {
 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
-	int ret;
 
 	if (nd->state != ncsi_dev_state_registered &&
 	    nd->state != ncsi_dev_state_functional)
 		return -ENOTTY;
 
 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
+		ndp->package_probe_id = 0;
 		nd->state = ncsi_dev_state_probe;
 		schedule_work(&ndp->work);
 		return 0;
 	}
 
-	if (ndp->flags & NCSI_DEV_HWA) {
-		netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n");
-		ret = ncsi_enable_hwa(ndp);
-	} else {
-		ret = ncsi_choose_active_channel(ndp);
-	}
-
-	return ret;
+	return ncsi_reset_dev(nd);
 }
 EXPORT_SYMBOL_GPL(ncsi_start_dev);
 
@@ -1624,7 +1752,10 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
 	int old_state;
 	unsigned long flags;
 
-	/* Stop the channel monitor and reset channel's state */
+	/* Stop the channel monitor on any active channels. Don't reset the
+	 * channel state so we know which were active when ncsi_start_dev()
+	 * is next called.
+	 */
 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 		NCSI_FOR_EACH_CHANNEL(np, nc) {
 			ncsi_stop_channel_monitor(nc);
@@ -1632,7 +1763,6 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
 			spin_lock_irqsave(&nc->lock, flags);
 			chained = !list_empty(&nc->link);
 			old_state = nc->state;
-			nc->state = NCSI_CHANNEL_INACTIVE;
 			spin_unlock_irqrestore(&nc->lock, flags);
 
 			WARN_ON_ONCE(chained ||
@@ -1645,6 +1775,92 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
 }
 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
 
+int ncsi_reset_dev(struct ncsi_dev *nd)
+{
+	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
+	struct ncsi_channel *nc, *active, *tmp;
+	struct ncsi_package *np;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ndp->lock, flags);
+
+	if (!(ndp->flags & NCSI_DEV_RESET)) {
+		/* Haven't been called yet, check states */
+		switch (nd->state & ncsi_dev_state_major) {
+		case ncsi_dev_state_registered:
+		case ncsi_dev_state_probe:
+			/* Not even probed yet - do nothing */
+			spin_unlock_irqrestore(&ndp->lock, flags);
+			return 0;
+		case ncsi_dev_state_suspend:
+		case ncsi_dev_state_config:
+			/* Wait for the channel to finish its suspend/config
+			 * operation; once it finishes it will check for
+			 * NCSI_DEV_RESET and reset the state.
+			 */
+			ndp->flags |= NCSI_DEV_RESET;
+			spin_unlock_irqrestore(&ndp->lock, flags);
+			return 0;
+		}
+	} else {
+		switch (nd->state) {
+		case ncsi_dev_state_suspend_done:
+		case ncsi_dev_state_config_done:
+		case ncsi_dev_state_functional:
+			/* Ok */
+			break;
+		default:
+			/* Current reset operation happening */
+			spin_unlock_irqrestore(&ndp->lock, flags);
+			return 0;
+		}
+	}
+
+	if (!list_empty(&ndp->channel_queue)) {
+		/* Clear any channel queue we may have interrupted */
+		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
+			list_del_init(&nc->link);
+	}
+	spin_unlock_irqrestore(&ndp->lock, flags);
+
+	active = NULL;
+	NCSI_FOR_EACH_PACKAGE(ndp, np) {
+		NCSI_FOR_EACH_CHANNEL(np, nc) {
+			spin_lock_irqsave(&nc->lock, flags);
+
+			if (nc->state == NCSI_CHANNEL_ACTIVE) {
+				active = nc;
+				nc->state = NCSI_CHANNEL_INVISIBLE;
+				spin_unlock_irqrestore(&nc->lock, flags);
+				ncsi_stop_channel_monitor(nc);
+				break;
+			}
+
+			spin_unlock_irqrestore(&nc->lock, flags);
+		}
+		if (active)
+			break;
+	}
+
+	if (!active) {
+		/* Done */
+		spin_lock_irqsave(&ndp->lock, flags);
+		ndp->flags &= ~NCSI_DEV_RESET;
+		spin_unlock_irqrestore(&ndp->lock, flags);
+		return ncsi_choose_active_channel(ndp);
+	}
+
+	spin_lock_irqsave(&ndp->lock, flags);
+	ndp->flags |= NCSI_DEV_RESET;
+	ndp->active_channel = active;
+	ndp->active_package = active->package;
+	spin_unlock_irqrestore(&ndp->lock, flags);
+
+	nd->state = ncsi_dev_state_suspend;
+	schedule_work(&ndp->work);
+	return 0;
+}
+
 void ncsi_unregister_dev(struct ncsi_dev *nd)
 {
 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 3331438..5d78244 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -30,6 +30,9 @@ static const struct nla_policy ncsi_genl_policy[NCSI_ATTR_MAX + 1] = {
 	[NCSI_ATTR_PACKAGE_ID] =	{ .type = NLA_U32 },
 	[NCSI_ATTR_CHANNEL_ID] =	{ .type = NLA_U32 },
 	[NCSI_ATTR_DATA] =		{ .type = NLA_BINARY, .len = 2048 },
+	[NCSI_ATTR_MULTI_FLAG] =	{ .type = NLA_FLAG },
+	[NCSI_ATTR_PACKAGE_MASK] =	{ .type = NLA_U32 },
+	[NCSI_ATTR_CHANNEL_MASK] =	{ .type = NLA_U32 },
 };
 
 static struct ncsi_dev_priv *ndp_from_ifindex(struct net *net, u32 ifindex)
@@ -69,7 +72,7 @@ static int ncsi_write_channel_info(struct sk_buff *skb,
 	nla_put_u32(skb, NCSI_CHANNEL_ATTR_LINK_STATE, m->data[2]);
 	if (nc->state == NCSI_CHANNEL_ACTIVE)
 		nla_put_flag(skb, NCSI_CHANNEL_ATTR_ACTIVE);
-	if (ndp->force_channel == nc)
+	if (nc == nc->package->preferred_channel)
 		nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
 
 	nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
@@ -114,7 +117,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
 		if (!pnest)
 			return -ENOMEM;
 		nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
-		if (ndp->force_package == np)
+		if ((0x1 << np->id) == ndp->package_whitelist)
 			nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
 		cnest = nla_nest_start(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
 		if (!cnest) {
@@ -290,49 +293,58 @@ static int ncsi_set_interface_nl(struct sk_buff *msg, struct genl_info *info)
 	package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
 	package = NULL;
 
-	spin_lock_irqsave(&ndp->lock, flags);
-
 	NCSI_FOR_EACH_PACKAGE(ndp, np)
 		if (np->id == package_id)
 			package = np;
 	if (!package) {
 		/* The user has set a package that does not exist */
-		spin_unlock_irqrestore(&ndp->lock, flags);
 		return -ERANGE;
 	}
 
 	channel = NULL;
-	if (!info->attrs[NCSI_ATTR_CHANNEL_ID]) {
-		/* Allow any channel */
-		channel_id = NCSI_RESERVED_CHANNEL;
-	} else {
+	if (info->attrs[NCSI_ATTR_CHANNEL_ID]) {
 		channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
 		NCSI_FOR_EACH_CHANNEL(package, nc)
-			if (nc->id == channel_id)
+			if (nc->id == channel_id) {
 				channel = nc;
+				break;
+			}
+		if (!channel) {
+			netdev_info(ndp->ndev.dev,
+				    "NCSI: Channel %u does not exist!\n",
+				    channel_id);
+			return -ERANGE;
+		}
 	}
 
-	if (channel_id != NCSI_RESERVED_CHANNEL && !channel) {
-		/* The user has set a channel that does not exist on this
-		 * package
-		 */
-		spin_unlock_irqrestore(&ndp->lock, flags);
-		netdev_info(ndp->ndev.dev, "NCSI: Channel %u does not exist!\n",
-			    channel_id);
-		return -ERANGE;
-	}
-
-	ndp->force_package = package;
-	ndp->force_channel = channel;
+	spin_lock_irqsave(&ndp->lock, flags);
+	ndp->package_whitelist = 0x1 << package->id;
+	ndp->multi_package = false;
 	spin_unlock_irqrestore(&ndp->lock, flags);
 
-	netdev_info(ndp->ndev.dev, "Set package 0x%x, channel 0x%x%s as preferred\n",
-		    package_id, channel_id,
-		    channel_id == NCSI_RESERVED_CHANNEL ? " (any)" : "");
+	spin_lock_irqsave(&package->lock, flags);
+	package->multi_channel = false;
+	if (channel) {
+		package->channel_whitelist = 0x1 << channel->id;
+		package->preferred_channel = channel;
+	} else {
+		/* Allow any channel */
+		package->channel_whitelist = UINT_MAX;
+		package->preferred_channel = NULL;
+	}
+	spin_unlock_irqrestore(&package->lock, flags);
 
-	/* Bounce the NCSI channel to set changes */
-	ncsi_stop_dev(&ndp->ndev);
-	ncsi_start_dev(&ndp->ndev);
+	if (channel)
+		netdev_info(ndp->ndev.dev,
+			    "Set package 0x%x, channel 0x%x as preferred\n",
+			    package_id, channel_id);
+	else
+		netdev_info(ndp->ndev.dev, "Set package 0x%x as preferred\n",
+			    package_id);
+
+	/* Update channel configuration */
+	if (!(ndp->flags & NCSI_DEV_RESET))
+		ncsi_reset_dev(&ndp->ndev);
 
 	return 0;
 }
@@ -340,6 +352,7 @@ static int ncsi_set_interface_nl(struct sk_buff *msg, struct genl_info *info)
 static int ncsi_clear_interface_nl(struct sk_buff *msg, struct genl_info *info)
 {
 	struct ncsi_dev_priv *ndp;
+	struct ncsi_package *np;
 	unsigned long flags;
 
 	if (!info || !info->attrs)
@@ -353,16 +366,24 @@ static int ncsi_clear_interface_nl(struct sk_buff *msg, struct genl_info *info)
 	if (!ndp)
 		return -ENODEV;
 
-	/* Clear any override */
+	/* Reset any whitelists and disable multi mode */
 	spin_lock_irqsave(&ndp->lock, flags);
-	ndp->force_package = NULL;
-	ndp->force_channel = NULL;
+	ndp->package_whitelist = UINT_MAX;
+	ndp->multi_package = false;
 	spin_unlock_irqrestore(&ndp->lock, flags);
+
+	NCSI_FOR_EACH_PACKAGE(ndp, np) {
+		spin_lock_irqsave(&np->lock, flags);
+		np->multi_channel = false;
+		np->channel_whitelist = UINT_MAX;
+		np->preferred_channel = NULL;
+		spin_unlock_irqrestore(&np->lock, flags);
+	}
 	netdev_info(ndp->ndev.dev, "NCSI: Cleared preferred package/channel\n");
 
-	/* Bounce the NCSI channel to set changes */
-	ncsi_stop_dev(&ndp->ndev);
-	ncsi_start_dev(&ndp->ndev);
+	/* Update channel configuration */
+	if (!(ndp->flags & NCSI_DEV_RESET))
+		ncsi_reset_dev(&ndp->ndev);
 
 	return 0;
 }
@@ -563,6 +584,138 @@ int ncsi_send_netlink_err(struct net_device *dev,
 	return nlmsg_unicast(net->genl_sock, skb, snd_portid);
 }
 
+static int ncsi_set_package_mask_nl(struct sk_buff *msg,
+				    struct genl_info *info)
+{
+	struct ncsi_dev_priv *ndp;
+	unsigned long flags;
+	int rc;
+
+	if (!info || !info->attrs)
+		return -EINVAL;
+
+	if (!info->attrs[NCSI_ATTR_IFINDEX])
+		return -EINVAL;
+
+	if (!info->attrs[NCSI_ATTR_PACKAGE_MASK])
+		return -EINVAL;
+
+	ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
+			       nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
+	if (!ndp)
+		return -ENODEV;
+
+	spin_lock_irqsave(&ndp->lock, flags);
+	if (nla_get_flag(info->attrs[NCSI_ATTR_MULTI_FLAG])) {
+		if (ndp->flags & NCSI_DEV_HWA) {
+			ndp->multi_package = true;
+			rc = 0;
+		} else {
+			netdev_err(ndp->ndev.dev,
+				   "NCSI: Can't use multiple packages without HWA\n");
+			rc = -EPERM;
+		}
+	} else {
+		ndp->multi_package = false;
+		rc = 0;
+	}
+
+	if (!rc)
+		ndp->package_whitelist =
+			nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_MASK]);
+	spin_unlock_irqrestore(&ndp->lock, flags);
+
+	if (!rc) {
+		/* Update channel configuration */
+		if (!(ndp->flags & NCSI_DEV_RESET))
+			ncsi_reset_dev(&ndp->ndev);
+	}
+
+	return rc;
+}
+
+static int ncsi_set_channel_mask_nl(struct sk_buff *msg,
+				    struct genl_info *info)
+{
+	struct ncsi_package *np, *package;
+	struct ncsi_channel *nc, *channel;
+	u32 package_id, channel_id;
+	struct ncsi_dev_priv *ndp;
+	unsigned long flags;
+
+	if (!info || !info->attrs)
+		return -EINVAL;
+
+	if (!info->attrs[NCSI_ATTR_IFINDEX])
+		return -EINVAL;
+
+	if (!info->attrs[NCSI_ATTR_PACKAGE_ID])
+		return -EINVAL;
+
+	if (!info->attrs[NCSI_ATTR_CHANNEL_MASK])
+		return -EINVAL;
+
+	ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
+			       nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
+	if (!ndp)
+		return -ENODEV;
+
+	package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
+	package = NULL;
+	NCSI_FOR_EACH_PACKAGE(ndp, np)
+		if (np->id == package_id) {
+			package = np;
+			break;
+		}
+	if (!package)
+		return -ERANGE;
+
+	spin_lock_irqsave(&package->lock, flags);
+
+	channel = NULL;
+	if (info->attrs[NCSI_ATTR_CHANNEL_ID]) {
+		channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
+		NCSI_FOR_EACH_CHANNEL(np, nc)
+			if (nc->id == channel_id) {
+				channel = nc;
+				break;
+			}
+		if (!channel) {
+			spin_unlock_irqrestore(&package->lock, flags);
+			return -ERANGE;
+		}
+		netdev_dbg(ndp->ndev.dev,
+			   "NCSI: Channel %u set as preferred channel\n",
+			   channel->id);
+	}
+
+	package->channel_whitelist =
+		nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_MASK]);
+	if (package->channel_whitelist == 0)
+		netdev_dbg(ndp->ndev.dev,
+			   "NCSI: Package %u set to all channels disabled\n",
+			   package->id);
+
+	package->preferred_channel = channel;
+
+	if (nla_get_flag(info->attrs[NCSI_ATTR_MULTI_FLAG])) {
+		package->multi_channel = true;
+		netdev_info(ndp->ndev.dev,
+			    "NCSI: Multi-channel enabled on package %u\n",
+			    package_id);
+	} else {
+		package->multi_channel = false;
+	}
+
+	spin_unlock_irqrestore(&package->lock, flags);
+
+	/* Update channel configuration */
+	if (!(ndp->flags & NCSI_DEV_RESET))
+		ncsi_reset_dev(&ndp->ndev);
+
+	return 0;
+}
+
 static const struct genl_ops ncsi_ops[] = {
 	{
 		.cmd = NCSI_CMD_PKG_INFO,
@@ -589,6 +742,18 @@ static const struct genl_ops ncsi_ops[] = {
 		.doit = ncsi_send_cmd_nl,
 		.flags = GENL_ADMIN_PERM,
 	},
+	{
+		.cmd = NCSI_CMD_SET_PACKAGE_MASK,
+		.policy = ncsi_genl_policy,
+		.doit = ncsi_set_package_mask_nl,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = NCSI_CMD_SET_CHANNEL_MASK,
+		.policy = ncsi_genl_policy,
+		.doit = ncsi_set_channel_mask_nl,
+		.flags = GENL_ADMIN_PERM,
+	},
 };
 
 static struct genl_family ncsi_genl_family __ro_after_init = {
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 77e07ba..de7737a 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -256,7 +256,7 @@ static int ncsi_rsp_handler_dcnt(struct ncsi_request *nr)
 	if (!ncm->enable)
 		return 0;
 
-	ncm->enable = 1;
+	ncm->enable = 0;
 	return 0;
 }
 
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 43041f0..1ce30ef 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1148,8 +1148,9 @@ static int nfqa_parse_bridge(struct nf_queue_entry *entry,
 		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
 			return -EINVAL;
 
-		entry->skb->vlan_tci = ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]));
-		entry->skb->vlan_proto = nla_get_be16(tb[NFQA_VLAN_PROTO]);
+		__vlan_hwaccel_put_tag(entry->skb,
+			nla_get_be16(tb[NFQA_VLAN_PROTO]),
+			ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
 	}
 
 	if (nfqa[NFQA_L2HDR]) {
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 85ae53d..e47ebbb 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -301,7 +301,7 @@ static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
 		key->eth.vlan.tpid = vlan->vlan_tpid;
 	}
 	return skb_vlan_push(skb, vlan->vlan_tpid,
-			     ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+			     ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
 }
 
 /* 'src' is already properly masked. */
@@ -822,8 +822,10 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
 	__skb_dst_copy(skb, data->dst);
 	*OVS_CB(skb) = data->cb;
 	skb->inner_protocol = data->inner_protocol;
-	skb->vlan_tci = data->vlan_tci;
-	skb->vlan_proto = data->vlan_proto;
+	if (data->vlan_tci & VLAN_CFI_MASK)
+		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
+	else
+		__vlan_hwaccel_clear_tag(skb);
 
 	/* Reconstruct the MAC header.  */
 	skb_push(skb, data->l2_len);
@@ -867,7 +869,10 @@ static void prepare_frag(struct vport *vport, struct sk_buff *skb,
 	data->cb = *OVS_CB(skb);
 	data->inner_protocol = skb->inner_protocol;
 	data->network_offset = orig_network_offset;
-	data->vlan_tci = skb->vlan_tci;
+	if (skb_vlan_tag_present(skb))
+		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
+	else
+		data->vlan_tci = 0;
 	data->vlan_proto = skb->vlan_proto;
 	data->mac_proto = mac_proto;
 	data->l2_len = hlen;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 35966da..57e0776 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -325,7 +325,7 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
 		return -ENOMEM;
 
 	vh = (struct vlan_head *)skb->data;
-	key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
+	key_vh->tci = vh->tci | htons(VLAN_CFI_MASK);
 	key_vh->tpid = vh->tpid;
 
 	if (unlikely(untag_vlan)) {
@@ -358,7 +358,7 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 	int res;
 
 	if (skb_vlan_tag_present(skb)) {
-		key->eth.vlan.tci = htons(skb->vlan_tci);
+		key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK);
 		key->eth.vlan.tpid = skb->vlan_proto;
 	} else {
 		/* Parse outer vlan tag in the non-accelerated case. */
@@ -597,7 +597,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 		 * skb_vlan_pop(), which will later shift the ethertype into
 		 * skb->protocol.
 		 */
-		if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
+		if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
 			skb->protocol = key->eth.cvlan.tpid;
 		else
 			skb->protocol = key->eth.type;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index c670dd2..ba01fc4 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -60,7 +60,7 @@ struct ovs_tunnel_info {
 
 struct vlan_head {
 	__be16 tpid; /* Vlan type. Generally 802.1q or 802.1ad.*/
-	__be16 tci;  /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+	__be16 tci;  /* 0 if no VLAN, VLAN_CFI_MASK set otherwise. */
 };
 
 #define OVS_SW_FLOW_KEY_METADATA_SIZE			\
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 865ecef..435a4bd 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -990,9 +990,9 @@ static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
 	if (a[OVS_KEY_ATTR_VLAN])
 		tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
 
-	if (!(tci & htons(VLAN_TAG_PRESENT))) {
+	if (!(tci & htons(VLAN_CFI_MASK))) {
 		if (tci) {
-			OVS_NLERR(log, "%s TCI does not have VLAN_TAG_PRESENT bit set.",
+			OVS_NLERR(log, "%s TCI does not have VLAN_CFI_MASK bit set.",
 				  (inner) ? "C-VLAN" : "VLAN");
 			return -EINVAL;
 		} else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
@@ -1013,9 +1013,9 @@ static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
 	__be16 tci = 0;
 	__be16 tpid = 0;
 	bool encap_valid = !!(match->key->eth.vlan.tci &
-			      htons(VLAN_TAG_PRESENT));
+			      htons(VLAN_CFI_MASK));
 	bool i_encap_valid = !!(match->key->eth.cvlan.tci &
-				htons(VLAN_TAG_PRESENT));
+				htons(VLAN_CFI_MASK));
 
 	if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
 		/* Not a VLAN. */
@@ -1039,8 +1039,8 @@ static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
 			  (inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
 		return -EINVAL;
 	}
-	if (!(tci & htons(VLAN_TAG_PRESENT))) {
-		OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_TAG_PRESENT bit.",
+	if (!(tci & htons(VLAN_CFI_MASK))) {
+		OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_CFI_MASK bit.",
 			  (inner) ? "C-VLAN" : "VLAN");
 		return -EINVAL;
 	}
@@ -1095,7 +1095,7 @@ static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
 	if (err)
 		return err;
 
-	encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_TAG_PRESENT));
+	encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_CFI_MASK));
 	if (encap_valid) {
 		err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
 						is_mask, log);
@@ -2943,7 +2943,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 			vlan = nla_data(a);
 			if (!eth_type_vlan(vlan->vlan_tpid))
 				return -EINVAL;
-			if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+			if (!(vlan->vlan_tci & htons(VLAN_CFI_MASK)))
 				return -EINVAL;
 			vlan_tci = vlan->vlan_tci;
 			break;
@@ -2959,7 +2959,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 			/* Prohibit push MPLS other than to a white list
 			 * for packets that have a known tag order.
 			 */
-			if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+			if (vlan_tci & htons(VLAN_CFI_MASK) ||
 			    (eth_type != htons(ETH_P_IP) &&
 			     eth_type != htons(ETH_P_IPV6) &&
 			     eth_type != htons(ETH_P_ARP) &&
@@ -2971,7 +2971,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 		}
 
 		case OVS_ACTION_ATTR_POP_MPLS:
-			if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+			if (vlan_tci & htons(VLAN_CFI_MASK) ||
 			    !eth_p_mpls(eth_type))
 				return -EINVAL;
 
@@ -3036,7 +3036,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 		case OVS_ACTION_ATTR_POP_ETH:
 			if (mac_proto != MAC_PROTO_ETHERNET)
 				return -EINVAL;
-			if (vlan_tci & htons(VLAN_TAG_PRESENT))
+			if (vlan_tci & htons(VLAN_CFI_MASK))
 				return -EINVAL;
 			mac_proto = MAC_PROTO_NONE;
 			break;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 2e5e7a4..9bec22e 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -84,7 +84,6 @@ static struct net_device *get_dpdev(const struct datapath *dp)
 	struct vport *local;
 
 	local = ovs_vport_ovsl(dp, OVSP_LOCAL);
-	BUG_ON(!local);
 	return local->dev;
 }
 
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index ba677d5..93fdaf7 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -63,7 +63,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
 		/* extract existing tag (and guarantee no hw-accel tag) */
 		if (skb_vlan_tag_present(skb)) {
 			tci = skb_vlan_tag_get(skb);
-			skb->vlan_tci = 0;
+			__vlan_hwaccel_clear_tag(skb);
 		} else {
 			/* in-payload vlan tag, pop it */
 			err = __skb_vlan_pop(skb, &tci);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f427a1e..d92f44a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -25,6 +25,7 @@
 #include <linux/kmod.h>
 #include <linux/slab.h>
 #include <linux/idr.h>
+#include <linux/rhashtable.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/netlink.h>
@@ -365,6 +366,245 @@ static void tcf_chain_flush(struct tcf_chain *chain)
 	}
 }
 
+static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
+{
+	const struct Qdisc_class_ops *cops;
+	struct Qdisc *qdisc;
+
+	if (!dev_ingress_queue(dev))
+		return NULL;
+
+	qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
+	if (!qdisc)
+		return NULL;
+
+	cops = qdisc->ops->cl_ops;
+	if (!cops)
+		return NULL;
+
+	if (!cops->tcf_block)
+		return NULL;
+
+	return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
+}
+
+static struct rhashtable indr_setup_block_ht;
+
+struct tc_indr_block_dev {
+	struct rhash_head ht_node;
+	struct net_device *dev;
+	unsigned int refcnt;
+	struct list_head cb_list;
+	struct tcf_block *block;
+};
+
+struct tc_indr_block_cb {
+	struct list_head list;
+	void *cb_priv;
+	tc_indr_block_bind_cb_t *cb;
+	void *cb_ident;
+};
+
+static const struct rhashtable_params tc_indr_setup_block_ht_params = {
+	.key_offset	= offsetof(struct tc_indr_block_dev, dev),
+	.head_offset	= offsetof(struct tc_indr_block_dev, ht_node),
+	.key_len	= sizeof(struct net_device *),
+};
+
+static struct tc_indr_block_dev *
+tc_indr_block_dev_lookup(struct net_device *dev)
+{
+	return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
+				      tc_indr_setup_block_ht_params);
+}
+
+static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
+{
+	struct tc_indr_block_dev *indr_dev;
+
+	indr_dev = tc_indr_block_dev_lookup(dev);
+	if (indr_dev)
+		goto inc_ref;
+
+	indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
+	if (!indr_dev)
+		return NULL;
+
+	INIT_LIST_HEAD(&indr_dev->cb_list);
+	indr_dev->dev = dev;
+	indr_dev->block = tc_dev_ingress_block(dev);
+	if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
+				   tc_indr_setup_block_ht_params)) {
+		kfree(indr_dev);
+		return NULL;
+	}
+
+inc_ref:
+	indr_dev->refcnt++;
+	return indr_dev;
+}
+
+static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
+{
+	if (--indr_dev->refcnt)
+		return;
+
+	rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
+			       tc_indr_setup_block_ht_params);
+	kfree(indr_dev);
+}
+
+static struct tc_indr_block_cb *
+tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
+			tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	struct tc_indr_block_cb *indr_block_cb;
+
+	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
+		if (indr_block_cb->cb == cb &&
+		    indr_block_cb->cb_ident == cb_ident)
+			return indr_block_cb;
+	return NULL;
+}
+
+static struct tc_indr_block_cb *
+tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
+		     tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	struct tc_indr_block_cb *indr_block_cb;
+
+	indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
+	if (indr_block_cb)
+		return ERR_PTR(-EEXIST);
+
+	indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
+	if (!indr_block_cb)
+		return ERR_PTR(-ENOMEM);
+
+	indr_block_cb->cb_priv = cb_priv;
+	indr_block_cb->cb = cb;
+	indr_block_cb->cb_ident = cb_ident;
+	list_add(&indr_block_cb->list, &indr_dev->cb_list);
+
+	return indr_block_cb;
+}
+
+static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
+{
+	list_del(&indr_block_cb->list);
+	kfree(indr_block_cb);
+}
+
+static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
+				  struct tc_indr_block_cb *indr_block_cb,
+				  enum tc_block_command command)
+{
+	struct tc_block_offload bo = {
+		.command	= command,
+		.binder_type	= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
+		.block		= indr_dev->block,
+	};
+
+	if (!indr_dev->block)
+		return;
+
+	indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
+			  &bo);
+}
+
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+				tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	struct tc_indr_block_cb *indr_block_cb;
+	struct tc_indr_block_dev *indr_dev;
+	int err;
+
+	indr_dev = tc_indr_block_dev_get(dev);
+	if (!indr_dev)
+		return -ENOMEM;
+
+	indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
+	err = PTR_ERR_OR_ZERO(indr_block_cb);
+	if (err)
+		goto err_dev_put;
+
+	tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
+	return 0;
+
+err_dev_put:
+	tc_indr_block_dev_put(indr_dev);
+	return err;
+}
+EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
+
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+			      tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	int err;
+
+	rtnl_lock();
+	err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
+	rtnl_unlock();
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
+
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+				   tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	struct tc_indr_block_cb *indr_block_cb;
+	struct tc_indr_block_dev *indr_dev;
+
+	indr_dev = tc_indr_block_dev_lookup(dev);
+	if (!indr_dev)
+		return;
+
+	indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
+	if (!indr_block_cb)
+		return;
+
+	/* Send unbind message if required to free any block cbs. */
+	tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
+	tc_indr_block_cb_del(indr_block_cb);
+	tc_indr_block_dev_put(indr_dev);
+}
+EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
+
+void tc_indr_block_cb_unregister(struct net_device *dev,
+				 tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+	rtnl_lock();
+	__tc_indr_block_cb_unregister(dev, cb, cb_ident);
+	rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
+
+static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
+			       struct tcf_block_ext_info *ei,
+			       enum tc_block_command command,
+			       struct netlink_ext_ack *extack)
+{
+	struct tc_indr_block_cb *indr_block_cb;
+	struct tc_indr_block_dev *indr_dev;
+	struct tc_block_offload bo = {
+		.command	= command,
+		.binder_type	= ei->binder_type,
+		.block		= block,
+		.extack		= extack,
+	};
+
+	indr_dev = tc_indr_block_dev_lookup(dev);
+	if (!indr_dev)
+		return;
+
+	indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
+
+	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
+		indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
+				  &bo);
+}
+
 static bool tcf_block_offload_in_use(struct tcf_block *block)
 {
 	return block->offloadcnt;
@@ -406,12 +646,17 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
 	if (err == -EOPNOTSUPP)
 		goto no_offload_dev_inc;
-	return err;
+	if (err)
+		return err;
+
+	tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
+	return 0;
 
 no_offload_dev_inc:
 	if (tcf_block_offload_in_use(block))
 		return -EOPNOTSUPP;
 	block->nooffloaddevcnt++;
+	tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
 	return 0;
 }
 
@@ -421,6 +666,8 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 	struct net_device *dev = q->dev_queue->dev;
 	int err;
 
+	tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
+
 	if (!dev->netdev_ops->ndo_setup_tc)
 		goto no_offload_dev_dec;
 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
@@ -2355,6 +2602,11 @@ static int __init tc_filter_init(void)
 	if (err)
 		goto err_register_pernet_subsys;
 
+	err = rhashtable_init(&indr_setup_block_ht,
+			      &tc_indr_setup_block_ht_params);
+	if (err)
+		goto err_rhash_setup_block_ht;
+
 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
@@ -2366,6 +2618,8 @@ static int __init tc_filter_init(void)
 
 	return 0;
 
+err_rhash_setup_block_ht:
+	unregister_pernet_subsys(&tcf_net_ops);
 err_register_pernet_subsys:
 	destroy_workqueue(tc_filter_wq);
 	return err;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index c6c3278..85e9f8e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -55,6 +55,8 @@ struct fl_flow_key {
 	struct flow_dissector_key_ip ip;
 	struct flow_dissector_key_ip enc_ip;
 	struct flow_dissector_key_enc_opts enc_opts;
+	struct flow_dissector_key_ports tp_min;
+	struct flow_dissector_key_ports tp_max;
 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
 
 struct fl_flow_mask_range {
@@ -65,6 +67,7 @@ struct fl_flow_mask_range {
 struct fl_flow_mask {
 	struct fl_flow_key key;
 	struct fl_flow_mask_range range;
+	u32 flags;
 	struct rhash_head ht_node;
 	struct rhashtable ht;
 	struct rhashtable_params filter_ht_params;
@@ -179,13 +182,89 @@ static void fl_clear_masked_range(struct fl_flow_key *key,
 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
 }
 
-static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
-				       struct fl_flow_key *mkey)
+static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
+				  struct fl_flow_key *key,
+				  struct fl_flow_key *mkey)
+{
+	__be16 min_mask, max_mask, min_val, max_val;
+
+	min_mask = htons(filter->mask->key.tp_min.dst);
+	max_mask = htons(filter->mask->key.tp_max.dst);
+	min_val = htons(filter->key.tp_min.dst);
+	max_val = htons(filter->key.tp_max.dst);
+
+	if (min_mask && max_mask) {
+		if (htons(key->tp.dst) < min_val ||
+		    htons(key->tp.dst) > max_val)
+			return false;
+
+		/* skb does not have min and max values */
+		mkey->tp_min.dst = filter->mkey.tp_min.dst;
+		mkey->tp_max.dst = filter->mkey.tp_max.dst;
+	}
+	return true;
+}
+
+static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
+				  struct fl_flow_key *key,
+				  struct fl_flow_key *mkey)
+{
+	__be16 min_mask, max_mask, min_val, max_val;
+
+	min_mask = htons(filter->mask->key.tp_min.src);
+	max_mask = htons(filter->mask->key.tp_max.src);
+	min_val = htons(filter->key.tp_min.src);
+	max_val = htons(filter->key.tp_max.src);
+
+	if (min_mask && max_mask) {
+		if (htons(key->tp.src) < min_val ||
+		    htons(key->tp.src) > max_val)
+			return false;
+
+		/* skb does not have min and max values */
+		mkey->tp_min.src = filter->mkey.tp_min.src;
+		mkey->tp_max.src = filter->mkey.tp_max.src;
+	}
+	return true;
+}
+
+static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
+					 struct fl_flow_key *mkey)
 {
 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
 				      mask->filter_ht_params);
 }
 
+static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
+					     struct fl_flow_key *mkey,
+					     struct fl_flow_key *key)
+{
+	struct cls_fl_filter *filter, *f;
+
+	list_for_each_entry_rcu(filter, &mask->filters, list) {
+		if (!fl_range_port_dst_cmp(filter, key, mkey))
+			continue;
+
+		if (!fl_range_port_src_cmp(filter, key, mkey))
+			continue;
+
+		f = __fl_lookup(mask, mkey);
+		if (f)
+			return f;
+	}
+	return NULL;
+}
+
+static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
+				       struct fl_flow_key *mkey,
+				       struct fl_flow_key *key)
+{
+	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
+		return fl_lookup_range(mask, mkey, key);
+
+	return __fl_lookup(mask, mkey);
+}
+
 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 		       struct tcf_result *res)
 {
@@ -208,7 +287,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 
 		fl_set_masked_key(&skb_mkey, &skb_key, mask);
 
-		f = fl_lookup(mask, &skb_mkey);
+		f = fl_lookup(mask, &skb_mkey, &skb_key);
 		if (f && !tc_skip_sw(f->flags)) {
 			*res = f->res;
 			return tcf_exts_exec(skb, &f->exts, res);
@@ -514,6 +593,31 @@ static void fl_set_key_val(struct nlattr **tb,
 		memcpy(mask, nla_data(tb[mask_type]), len);
 }
 
+static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
+				 struct fl_flow_key *mask)
+{
+	fl_set_key_val(tb, &key->tp_min.dst,
+		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
+		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
+	fl_set_key_val(tb, &key->tp_max.dst,
+		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
+		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
+	fl_set_key_val(tb, &key->tp_min.src,
+		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
+		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
+	fl_set_key_val(tb, &key->tp_max.src,
+		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
+		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
+
+	if ((mask->tp_min.dst && mask->tp_max.dst &&
+	     htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
+	     (mask->tp_min.src && mask->tp_max.src &&
+	      htons(key->tp_max.src) <= htons(key->tp_min.src)))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int fl_set_key_mpls(struct nlattr **tb,
 			   struct flow_dissector_key_mpls *key_val,
 			   struct flow_dissector_key_mpls *key_mask)
@@ -921,6 +1025,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
 			       sizeof(key->arp.tha));
 	}
 
+	if (key->basic.ip_proto == IPPROTO_TCP ||
+	    key->basic.ip_proto == IPPROTO_UDP ||
+	    key->basic.ip_proto == IPPROTO_SCTP) {
+		ret = fl_set_key_port_range(tb, key, mask);
+		if (ret)
+			return ret;
+	}
+
 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
@@ -1038,8 +1150,9 @@ static void fl_init_dissector(struct flow_dissector *dissector,
 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
-	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
-			     FLOW_DISSECTOR_KEY_PORTS, tp);
+	if (FL_KEY_IS_MASKED(mask, tp) ||
+	    FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
+		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
 			     FLOW_DISSECTOR_KEY_IP, ip);
 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
@@ -1086,6 +1199,10 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
 
 	fl_mask_copy(newmask, mask);
 
+	if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
+	    (newmask->key.tp_min.src && newmask->key.tp_max.src))
+		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
+
 	err = fl_init_mask_hashtable(newmask);
 	if (err)
 		goto errout_free;
@@ -1239,7 +1356,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 		goto errout_idr;
 
 	if (!tc_skip_sw(fnew->flags)) {
-		if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
+		if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
 			err = -EEXIST;
 			goto errout_mask;
 		}
@@ -1476,6 +1593,26 @@ static int fl_dump_key_val(struct sk_buff *skb,
 	return 0;
 }
 
+static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
+				  struct fl_flow_key *mask)
+{
+	if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
+			    &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
+			    sizeof(key->tp_min.dst)) ||
+	    fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
+			    &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
+			    sizeof(key->tp_max.dst)) ||
+	    fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
+			    &mask->tp_min.src, TCA_FLOWER_UNSPEC,
+			    sizeof(key->tp_min.src)) ||
+	    fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
+			    &mask->tp_max.src, TCA_FLOWER_UNSPEC,
+			    sizeof(key->tp_max.src)))
+		return -1;
+
+	return 0;
+}
+
 static int fl_dump_key_mpls(struct sk_buff *skb,
 			    struct flow_dissector_key_mpls *mpls_key,
 			    struct flow_dissector_key_mpls *mpls_mask)
@@ -1812,6 +1949,12 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
 				  sizeof(key->arp.tha))))
 		goto nla_put_failure;
 
+	if ((key->basic.ip_proto == IPPROTO_TCP ||
+	     key->basic.ip_proto == IPPROTO_UDP ||
+	     key->basic.ip_proto == IPPROTO_SCTP) &&
+	     fl_dump_key_port_range(skb, key, mask))
+		goto nla_put_failure;
+
 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4b28fd4..4c54bc4 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -558,6 +558,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 	cls_u32.knode.mask = 0;
 #endif
 	cls_u32.knode.sel = &n->sel;
+	cls_u32.knode.res = &n->res;
 	cls_u32.knode.exts = &n->exts;
 	if (n->ht_down)
 		cls_u32.knode.link_handle = ht->handle;
@@ -1206,6 +1207,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 		cls_u32.knode.mask = 0;
 #endif
 		cls_u32.knode.sel = &n->sel;
+		cls_u32.knode.res = &n->res;
 		cls_u32.knode.exts = &n->exts;
 		if (n->ht_down)
 			cls_u32.knode.link_handle = ht->handle;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ca3b0f4..9c88cec 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -810,6 +810,71 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
 }
 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
 
+int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
+			      void *type_data)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	int err;
+
+	sch->flags &= ~TCQ_F_OFFLOADED;
+	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+		return 0;
+
+	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
+	if (err == -EOPNOTSUPP)
+		return 0;
+
+	if (!err)
+		sch->flags |= TCQ_F_OFFLOADED;
+
+	return err;
+}
+EXPORT_SYMBOL(qdisc_offload_dump_helper);
+
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+				struct Qdisc *new, struct Qdisc *old,
+				enum tc_setup_type type, void *type_data,
+				struct netlink_ext_ack *extack)
+{
+	bool any_qdisc_is_offloaded;
+	int err;
+
+	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+		return;
+
+	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
+
+	/* Don't report error if the graft is part of destroy operation. */
+	if (!err || !new || new == &noop_qdisc)
+		return;
+
+	/* Don't report error if the parent, the old child and the new
+	 * one are not offloaded.
+	 */
+	any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
+	any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
+	any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
+
+	if (any_qdisc_is_offloaded)
+		NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
+}
+EXPORT_SYMBOL(qdisc_offload_graft_helper);
+
+static void qdisc_offload_graft_root(struct net_device *dev,
+				     struct Qdisc *new, struct Qdisc *old,
+				     struct netlink_ext_ack *extack)
+{
+	struct tc_root_qopt_offload graft_offload = {
+		.command	= TC_ROOT_GRAFT,
+		.handle		= new ? new->handle : 0,
+		.ingress	= (new && new->flags & TCQ_F_INGRESS) ||
+				  (old && old->flags & TCQ_F_INGRESS),
+	};
+
+	qdisc_offload_graft_helper(dev, NULL, new, old,
+				   TC_SETUP_ROOT_QDISC, &graft_offload, extack);
+}
+
 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 			 u32 portid, u32 seq, u16 flags, int event)
 {
@@ -957,7 +1022,6 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 {
 	struct Qdisc *q = old;
 	struct net *net = dev_net(dev);
-	int err = 0;
 
 	if (parent == NULL) {
 		unsigned int i, num_q, ingress;
@@ -977,6 +1041,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 		if (dev->flags & IFF_UP)
 			dev_deactivate(dev);
 
+		qdisc_offload_graft_root(dev, new, old, extack);
+
 		if (new && new->ops->attach)
 			goto skip;
 
@@ -1012,28 +1078,29 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 			dev_activate(dev);
 	} else {
 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
+		unsigned long cl;
+		int err;
 
 		/* Only support running class lockless if parent is lockless */
 		if (new && (new->flags & TCQ_F_NOLOCK) &&
 		    parent && !(parent->flags & TCQ_F_NOLOCK))
 			new->flags &= ~TCQ_F_NOLOCK;
 
-		err = -EOPNOTSUPP;
-		if (cops && cops->graft) {
-			unsigned long cl = cops->find(parent, classid);
+		if (!cops || !cops->graft)
+			return -EOPNOTSUPP;
 
-			if (cl) {
-				err = cops->graft(parent, cl, new, &old,
-						  extack);
-			} else {
-				NL_SET_ERR_MSG(extack, "Specified class not found");
-				err = -ENOENT;
-			}
+		cl = cops->find(parent, classid);
+		if (!cl) {
+			NL_SET_ERR_MSG(extack, "Specified class not found");
+			return -ENOENT;
 		}
-		if (!err)
-			notify_and_destroy(net, skb, n, classid, old, new);
+
+		err = cops->graft(parent, cl, new, &old, extack);
+		if (err)
+			return err;
+		notify_and_destroy(net, skb, n, classid, old, new);
 	}
-	return err;
+	return 0;
 }
 
 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 1538d6f..1150f22 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -30,7 +30,7 @@ struct etf_sched_data {
 	int queue;
 	s32 delta; /* in ns */
 	ktime_t last; /* The txtime of the last skb sent to the netdevice. */
-	struct rb_root head;
+	struct rb_root_cached head;
 	struct qdisc_watchdog watchdog;
 	ktime_t (*get_time)(void);
 };
@@ -104,7 +104,7 @@ static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch)
 	struct etf_sched_data *q = qdisc_priv(sch);
 	struct rb_node *p;
 
-	p = rb_first(&q->head);
+	p = rb_first_cached(&q->head);
 	if (!p)
 		return NULL;
 
@@ -117,8 +117,10 @@ static void reset_watchdog(struct Qdisc *sch)
 	struct sk_buff *skb = etf_peek_timesortedlist(sch);
 	ktime_t next;
 
-	if (!skb)
+	if (!skb) {
+		qdisc_watchdog_cancel(&q->watchdog);
 		return;
+	}
 
 	next = ktime_sub_ns(skb->tstamp, q->delta);
 	qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
@@ -154,8 +156,9 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
 				      struct sk_buff **to_free)
 {
 	struct etf_sched_data *q = qdisc_priv(sch);
-	struct rb_node **p = &q->head.rb_node, *parent = NULL;
+	struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
 	ktime_t txtime = nskb->tstamp;
+	bool leftmost = true;
 
 	if (!is_packet_valid(sch, nskb)) {
 		report_sock_error(nskb, EINVAL,
@@ -168,13 +171,15 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
 
 		parent = *p;
 		skb = rb_to_skb(parent);
-		if (ktime_after(txtime, skb->tstamp))
+		if (ktime_after(txtime, skb->tstamp)) {
 			p = &parent->rb_right;
-		else
+			leftmost = false;
+		} else {
 			p = &parent->rb_left;
+		}
 	}
 	rb_link_node(&nskb->rbnode, parent, p);
-	rb_insert_color(&nskb->rbnode, &q->head);
+	rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost);
 
 	qdisc_qstats_backlog_inc(sch, nskb);
 	sch->q.qlen++;
@@ -185,12 +190,42 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
 	return NET_XMIT_SUCCESS;
 }
 
-static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
-				 bool drop)
+static void timesortedlist_drop(struct Qdisc *sch, struct sk_buff *skb,
+				ktime_t now)
+{
+	struct etf_sched_data *q = qdisc_priv(sch);
+	struct sk_buff *to_free = NULL;
+	struct sk_buff *tmp = NULL;
+
+	skb_rbtree_walk_from_safe(skb, tmp) {
+		if (ktime_after(skb->tstamp, now))
+			break;
+
+		rb_erase_cached(&skb->rbnode, &q->head);
+
+		/* The rbnode field in the skb re-uses these fields, now that
+		 * we are done with the rbnode, reset them.
+		 */
+		skb->next = NULL;
+		skb->prev = NULL;
+		skb->dev = qdisc_dev(sch);
+
+		report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
+
+		qdisc_qstats_backlog_dec(sch, skb);
+		qdisc_drop(skb, sch, &to_free);
+		qdisc_qstats_overlimit(sch);
+		sch->q.qlen--;
+	}
+
+	kfree_skb_list(to_free);
+}
+
+static void timesortedlist_remove(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct etf_sched_data *q = qdisc_priv(sch);
 
-	rb_erase(&skb->rbnode, &q->head);
+	rb_erase_cached(&skb->rbnode, &q->head);
 
 	/* The rbnode field in the skb re-uses these fields, now that
 	 * we are done with the rbnode, reset them.
@@ -201,19 +236,9 @@ static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
 
 	qdisc_qstats_backlog_dec(sch, skb);
 
-	if (drop) {
-		struct sk_buff *to_free = NULL;
+	qdisc_bstats_update(sch, skb);
 
-		report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
-
-		qdisc_drop(skb, sch, &to_free);
-		kfree_skb_list(to_free);
-		qdisc_qstats_overlimit(sch);
-	} else {
-		qdisc_bstats_update(sch, skb);
-
-		q->last = skb->tstamp;
-	}
+	q->last = skb->tstamp;
 
 	sch->q.qlen--;
 }
@@ -232,7 +257,7 @@ static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
 
 	/* Drop if packet has expired while in queue. */
 	if (ktime_before(skb->tstamp, now)) {
-		timesortedlist_erase(sch, skb, true);
+		timesortedlist_drop(sch, skb, now);
 		skb = NULL;
 		goto out;
 	}
@@ -241,7 +266,7 @@ static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
 	 * txtime from deadline to (now + delta).
 	 */
 	if (q->deadline_mode) {
-		timesortedlist_erase(sch, skb, false);
+		timesortedlist_remove(sch, skb);
 		skb->tstamp = now;
 		goto out;
 	}
@@ -250,7 +275,7 @@ static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
 
 	/* Dequeue only if now is within the [txtime - delta, txtime] range. */
 	if (ktime_after(now, next))
-		timesortedlist_erase(sch, skb, false);
+		timesortedlist_remove(sch, skb);
 	else
 		skb = NULL;
 
@@ -386,14 +411,14 @@ static int etf_init(struct Qdisc *sch, struct nlattr *opt,
 static void timesortedlist_clear(struct Qdisc *sch)
 {
 	struct etf_sched_data *q = qdisc_priv(sch);
-	struct rb_node *p = rb_first(&q->head);
+	struct rb_node *p = rb_first_cached(&q->head);
 
 	while (p) {
 		struct sk_buff *skb = rb_to_skb(p);
 
 		p = rb_next(p);
 
-		rb_erase(&skb->rbnode, &q->head);
+		rb_erase_cached(&skb->rbnode, &q->head);
 		rtnl_kfree_skbs(skb, skb);
 		sch->q.qlen--;
 	}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 25a7cf6..1a662f2 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -94,6 +94,7 @@ struct fq_sched_data {
 	u32		flow_refill_delay;
 	u32		flow_plimit;	/* max packets per flow */
 	unsigned long	flow_max_rate;	/* optional max rate per flow */
+	u64		ce_threshold;
 	u32		orphan_mask;	/* mask for orphaned skb */
 	u32		low_rate_threshold;
 	struct rb_root	*fq_root;
@@ -107,6 +108,7 @@ struct fq_sched_data {
 	u64		stat_gc_flows;
 	u64		stat_internal_packets;
 	u64		stat_throttled;
+	u64		stat_ce_mark;
 	u64		stat_flows_plimit;
 	u64		stat_pkts_too_long;
 	u64		stat_allocation_errors;
@@ -412,16 +414,21 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
-	u64 now = ktime_get_ns();
 	struct fq_flow_head *head;
 	struct sk_buff *skb;
 	struct fq_flow *f;
 	unsigned long rate;
 	u32 plen;
+	u64 now;
+
+	if (!sch->q.qlen)
+		return NULL;
 
 	skb = fq_dequeue_head(sch, &q->internal);
 	if (skb)
 		goto out;
+
+	now = ktime_get_ns();
 	fq_check_throttled(q, now);
 begin:
 	head = &q->new_flows;
@@ -454,6 +461,11 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 			fq_flow_set_throttled(q, f);
 			goto begin;
 		}
+		if (time_next_packet &&
+		    (s64)(now - time_next_packet - q->ce_threshold) > 0) {
+			INET_ECN_set_ce(skb);
+			q->stat_ce_mark++;
+		}
 	}
 
 	skb = fq_dequeue_head(sch, f);
@@ -657,6 +669,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
 	[TCA_FQ_BUCKETS_LOG]		= { .type = NLA_U32 },
 	[TCA_FQ_FLOW_REFILL_DELAY]	= { .type = NLA_U32 },
 	[TCA_FQ_LOW_RATE_THRESHOLD]	= { .type = NLA_U32 },
+	[TCA_FQ_CE_THRESHOLD]		= { .type = NLA_U32 },
 };
 
 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
@@ -736,6 +749,10 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
 	if (tb[TCA_FQ_ORPHAN_MASK])
 		q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
 
+	if (tb[TCA_FQ_CE_THRESHOLD])
+		q->ce_threshold = (u64)NSEC_PER_USEC *
+				  nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
+
 	if (!err) {
 		sch_tree_unlock(sch);
 		err = fq_resize(sch, fq_log);
@@ -786,6 +803,10 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
 	q->fq_trees_log		= ilog2(1024);
 	q->orphan_mask		= 1024 - 1;
 	q->low_rate_threshold	= 550000 / 8;
+
+	/* Default ce_threshold of 4294 seconds */
+	q->ce_threshold		= (u64)NSEC_PER_USEC * ~0U;
+
 	qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
 
 	if (opt)
@@ -799,6 +820,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
+	u64 ce_threshold = q->ce_threshold;
 	struct nlattr *opts;
 
 	opts = nla_nest_start(skb, TCA_OPTIONS);
@@ -807,6 +829,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
 
 	/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
 
+	do_div(ce_threshold, NSEC_PER_USEC);
+
 	if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
 	    nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
 	    nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
@@ -819,6 +843,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
 	    nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
 	    nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
 			q->low_rate_threshold) ||
+	    nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
 	    nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
 		goto nla_put_failure;
 
@@ -848,6 +873,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 	st.throttled_flows	  = q->throttled_flows;
 	st.unthrottle_latency_ns  = min_t(unsigned long,
 					  q->unthrottle_latency_ns, ~0U);
+	st.ce_mark		  = q->stat_ce_mark;
 	sch_tree_unlock(sch);
 
 	return gnet_stats_copy_app(d, &st, sizeof(st));
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 4a042ab..234afbf 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -23,19 +23,23 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
+#include <net/pkt_cls.h>
 #include <net/pkt_sched.h>
 #include <net/red.h>
 
 #define GRED_DEF_PRIO (MAX_DPs / 2)
 #define GRED_VQ_MASK (MAX_DPs - 1)
 
+#define GRED_VQ_RED_FLAGS	(TC_RED_ECN | TC_RED_HARDDROP)
+
 struct gred_sched_data;
 struct gred_sched;
 
 struct gred_sched_data {
 	u32		limit;		/* HARD maximal queue length	*/
 	u32		DP;		/* the drop parameters */
-	u32		bytesin;	/* bytes seen on virtualQ so far*/
+	u32		red_flags;	/* virtualQ version of red_flags */
+	u64		bytesin;	/* bytes seen on virtualQ so far*/
 	u32		packetsin;	/* packets seen on virtualQ so far*/
 	u32		backlog;	/* bytes on the virtualQ */
 	u8		prio;		/* the prio of this vq */
@@ -139,14 +143,27 @@ static inline void gred_store_wred_set(struct gred_sched *table,
 	table->wred_set.qidlestart = q->vars.qidlestart;
 }
 
-static inline int gred_use_ecn(struct gred_sched *t)
+static int gred_use_ecn(struct gred_sched_data *q)
 {
-	return t->red_flags & TC_RED_ECN;
+	return q->red_flags & TC_RED_ECN;
 }
 
-static inline int gred_use_harddrop(struct gred_sched *t)
+static int gred_use_harddrop(struct gred_sched_data *q)
 {
-	return t->red_flags & TC_RED_HARDDROP;
+	return q->red_flags & TC_RED_HARDDROP;
+}
+
+static bool gred_per_vq_red_flags_used(struct gred_sched *table)
+{
+	unsigned int i;
+
+	/* Local per-vq flags couldn't have been set unless global are 0 */
+	if (table->red_flags)
+		return false;
+	for (i = 0; i < MAX_DPs; i++)
+		if (table->tab[i] && table->tab[i]->red_flags)
+			return true;
+	return false;
 }
 
 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -212,7 +229,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 	case RED_PROB_MARK:
 		qdisc_qstats_overlimit(sch);
-		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
 			q->stats.prob_drop++;
 			goto congestion_drop;
 		}
@@ -222,7 +239,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 	case RED_HARD_MARK:
 		qdisc_qstats_overlimit(sch);
-		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
 		    !INET_ECN_set_ce(skb)) {
 			q->stats.forced_drop++;
 			goto congestion_drop;
@@ -295,15 +312,103 @@ static void gred_reset(struct Qdisc *sch)
 	}
 }
 
+static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
+{
+	struct gred_sched *table = qdisc_priv(sch);
+	struct net_device *dev = qdisc_dev(sch);
+	struct tc_gred_qopt_offload opt = {
+		.command	= command,
+		.handle		= sch->handle,
+		.parent		= sch->parent,
+	};
+
+	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+		return;
+
+	if (command == TC_GRED_REPLACE) {
+		unsigned int i;
+
+		opt.set.grio_on = gred_rio_mode(table);
+		opt.set.wred_on = gred_wred_mode(table);
+		opt.set.dp_cnt = table->DPs;
+		opt.set.dp_def = table->def;
+
+		for (i = 0; i < table->DPs; i++) {
+			struct gred_sched_data *q = table->tab[i];
+
+			if (!q)
+				continue;
+			opt.set.tab[i].present = true;
+			opt.set.tab[i].limit = q->limit;
+			opt.set.tab[i].prio = q->prio;
+			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
+			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
+			opt.set.tab[i].is_ecn = gred_use_ecn(q);
+			opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
+			opt.set.tab[i].probability = q->parms.max_P;
+			opt.set.tab[i].backlog = &q->backlog;
+		}
+		opt.set.qstats = &sch->qstats;
+	}
+
+	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
+}
+
+static int gred_offload_dump_stats(struct Qdisc *sch)
+{
+	struct gred_sched *table = qdisc_priv(sch);
+	struct tc_gred_qopt_offload *hw_stats;
+	unsigned int i;
+	int ret;
+
+	hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
+	if (!hw_stats)
+		return -ENOMEM;
+
+	hw_stats->command = TC_GRED_STATS;
+	hw_stats->handle = sch->handle;
+	hw_stats->parent = sch->parent;
+
+	for (i = 0; i < MAX_DPs; i++)
+		if (table->tab[i])
+			hw_stats->stats.xstats[i] = &table->tab[i]->stats;
+
+	ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
+	/* Even if driver returns failure adjust the stats - in case offload
+	 * ended but driver still wants to adjust the values.
+	 */
+	for (i = 0; i < MAX_DPs; i++) {
+		if (!table->tab[i])
+			continue;
+		table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
+		table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
+		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
+
+		_bstats_update(&sch->bstats,
+			       hw_stats->stats.bstats[i].bytes,
+			       hw_stats->stats.bstats[i].packets);
+		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
+		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
+		sch->qstats.drops += hw_stats->stats.qstats[i].drops;
+		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
+		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
+	}
+
+	kfree(hw_stats);
+	return ret;
+}
+
 static inline void gred_destroy_vq(struct gred_sched_data *q)
 {
 	kfree(q);
 }
 
-static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
+static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
+				 struct netlink_ext_ack *extack)
 {
 	struct gred_sched *table = qdisc_priv(sch);
 	struct tc_gred_sopt *sopt;
+	bool red_flags_changed;
 	int i;
 
 	if (!dps)
@@ -311,13 +416,28 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 
 	sopt = nla_data(dps);
 
-	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
-	    sopt->def_DP >= sopt->DPs)
+	if (sopt->DPs > MAX_DPs) {
+		NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
 		return -EINVAL;
+	}
+	if (sopt->DPs == 0) {
+		NL_SET_ERR_MSG_MOD(extack,
+				   "number of virtual queues can't be 0");
+		return -EINVAL;
+	}
+	if (sopt->def_DP >= sopt->DPs) {
+		NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
+		return -EINVAL;
+	}
+	if (sopt->flags && gred_per_vq_red_flags_used(table)) {
+		NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
+		return -EINVAL;
+	}
 
 	sch_tree_lock(sch);
 	table->DPs = sopt->DPs;
 	table->def = sopt->def_DP;
+	red_flags_changed = table->red_flags != sopt->flags;
 	table->red_flags = sopt->flags;
 
 	/*
@@ -337,6 +457,12 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 		gred_disable_wred_mode(table);
 	}
 
+	if (red_flags_changed)
+		for (i = 0; i < table->DPs; i++)
+			if (table->tab[i])
+				table->tab[i]->red_flags =
+					table->red_flags & GRED_VQ_RED_FLAGS;
+
 	for (i = table->DPs; i < MAX_DPs; i++) {
 		if (table->tab[i]) {
 			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
@@ -346,25 +472,30 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 		}
 	}
 
+	gred_offload(sch, TC_GRED_REPLACE);
 	return 0;
 }
 
 static inline int gred_change_vq(struct Qdisc *sch, int dp,
 				 struct tc_gred_qopt *ctl, int prio,
 				 u8 *stab, u32 max_P,
-				 struct gred_sched_data **prealloc)
+				 struct gred_sched_data **prealloc,
+				 struct netlink_ext_ack *extack)
 {
 	struct gred_sched *table = qdisc_priv(sch);
 	struct gred_sched_data *q = table->tab[dp];
 
-	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
+		NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
 		return -EINVAL;
+	}
 
 	if (!q) {
 		table->tab[dp] = q = *prealloc;
 		*prealloc = NULL;
 		if (!q)
 			return -ENOMEM;
+		q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
 	}
 
 	q->DP = dp;
@@ -384,14 +515,127 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
 	return 0;
 }
 
+static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
+	[TCA_GRED_VQ_DP]	= { .type = NLA_U32 },
+	[TCA_GRED_VQ_FLAGS]	= { .type = NLA_U32 },
+};
+
+static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
+	[TCA_GRED_VQ_ENTRY]	= { .type = NLA_NESTED },
+};
+
 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
 	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
 	[TCA_GRED_STAB]		= { .len = 256 },
 	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
 	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
 	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
+	[TCA_GRED_VQ_LIST]	= { .type = NLA_NESTED },
 };
 
+static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
+{
+	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
+	u32 dp;
+
+	nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy, NULL);
+
+	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
+
+	if (tb[TCA_GRED_VQ_FLAGS])
+		table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
+}
+
+static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
+{
+	const struct nlattr *attr;
+	int rem;
+
+	nla_for_each_nested(attr, vqs, rem) {
+		switch (nla_type(attr)) {
+		case TCA_GRED_VQ_ENTRY:
+			gred_vq_apply(table, attr);
+			break;
+		}
+	}
+}
+
+static int gred_vq_validate(struct gred_sched *table, u32 cdp,
+			    const struct nlattr *entry,
+			    struct netlink_ext_ack *extack)
+{
+	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
+	int err;
+	u32 dp;
+
+	err = nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy,
+			       extack);
+	if (err < 0)
+		return err;
+
+	if (!tb[TCA_GRED_VQ_DP]) {
+		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
+		return -EINVAL;
+	}
+	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
+	if (dp >= table->DPs) {
+		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
+		return -EINVAL;
+	}
+	if (dp != cdp && !table->tab[dp]) {
+		NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
+		return -EINVAL;
+	}
+
+	if (tb[TCA_GRED_VQ_FLAGS]) {
+		u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
+
+		if (table->red_flags && table->red_flags != red_flags) {
+			NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
+			return -EINVAL;
+		}
+		if (red_flags & ~GRED_VQ_RED_FLAGS) {
+			NL_SET_ERR_MSG_MOD(extack,
+					   "invalid RED flags specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
+			     struct nlattr *vqs, struct netlink_ext_ack *extack)
+{
+	const struct nlattr *attr;
+	int rem, err;
+
+	err = nla_validate_nested(vqs, TCA_GRED_VQ_ENTRY_MAX,
+				  gred_vqe_policy, extack);
+	if (err < 0)
+		return err;
+
+	nla_for_each_nested(attr, vqs, rem) {
+		switch (nla_type(attr)) {
+		case TCA_GRED_VQ_ENTRY:
+			err = gred_vq_validate(table, cdp, attr, extack);
+			if (err)
+				return err;
+			break;
+		default:
+			NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
+			return -EINVAL;
+		}
+	}
+
+	if (rem > 0) {
+		NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
 		       struct netlink_ext_ack *extack)
 {
@@ -406,29 +650,39 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
 	if (opt == NULL)
 		return -EINVAL;
 
-	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
+	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, extack);
 	if (err < 0)
 		return err;
 
 	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
 		if (tb[TCA_GRED_LIMIT] != NULL)
 			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
-		return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
+		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
 	}
 
 	if (tb[TCA_GRED_PARMS] == NULL ||
 	    tb[TCA_GRED_STAB] == NULL ||
-	    tb[TCA_GRED_LIMIT] != NULL)
+	    tb[TCA_GRED_LIMIT] != NULL) {
+		NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
 		return -EINVAL;
+	}
 
 	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
 
-	err = -EINVAL;
 	ctl = nla_data(tb[TCA_GRED_PARMS]);
 	stab = nla_data(tb[TCA_GRED_STAB]);
 
-	if (ctl->DP >= table->DPs)
-		goto errout;
+	if (ctl->DP >= table->DPs) {
+		NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
+		return -EINVAL;
+	}
+
+	if (tb[TCA_GRED_VQ_LIST]) {
+		err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
+					extack);
+		if (err)
+			return err;
+	}
 
 	if (gred_rio_mode(table)) {
 		if (ctl->prio == 0) {
@@ -448,9 +702,13 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
 	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
 	sch_tree_lock(sch);
 
-	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
+	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
+			     extack);
 	if (err < 0)
-		goto errout_locked;
+		goto err_unlock_free;
+
+	if (tb[TCA_GRED_VQ_LIST])
+		gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
 
 	if (gred_rio_mode(table)) {
 		gred_disable_wred_mode(table);
@@ -458,12 +716,15 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
 			gred_enable_wred_mode(table);
 	}
 
-	err = 0;
-
-errout_locked:
 	sch_tree_unlock(sch);
 	kfree(prealloc);
-errout:
+
+	gred_offload(sch, TC_GRED_REPLACE);
+	return 0;
+
+err_unlock_free:
+	sch_tree_unlock(sch);
+	kfree(prealloc);
 	return err;
 }
 
@@ -476,12 +737,15 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
 	if (!opt)
 		return -EINVAL;
 
-	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
+	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
+	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
+		NL_SET_ERR_MSG_MOD(extack,
+				   "virtual queue configuration can't be specified at initialization time");
 		return -EINVAL;
+	}
 
 	if (tb[TCA_GRED_LIMIT])
 		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
@@ -489,13 +753,13 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
 		sch->limit = qdisc_dev(sch)->tx_queue_len
 		             * psched_mtu(qdisc_dev(sch));
 
-	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
+	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
 }
 
 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct gred_sched *table = qdisc_priv(sch);
-	struct nlattr *parms, *opts = NULL;
+	struct nlattr *parms, *vqs, *opts = NULL;
 	int i;
 	u32 max_p[MAX_DPs];
 	struct tc_gred_sopt sopt = {
@@ -505,6 +769,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 		.flags	= table->red_flags,
 	};
 
+	if (gred_offload_dump_stats(sch))
+		goto nla_put_failure;
+
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
@@ -522,6 +789,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
 		goto nla_put_failure;
 
+	/* Old style all-in-one dump of VQs */
 	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 	if (parms == NULL)
 		goto nla_put_failure;
@@ -572,6 +840,58 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 
 	nla_nest_end(skb, parms);
 
+	/* Dump the VQs again, in more structured way */
+	vqs = nla_nest_start(skb, TCA_GRED_VQ_LIST);
+	if (!vqs)
+		goto nla_put_failure;
+
+	for (i = 0; i < MAX_DPs; i++) {
+		struct gred_sched_data *q = table->tab[i];
+		struct nlattr *vq;
+
+		if (!q)
+			continue;
+
+		vq = nla_nest_start(skb, TCA_GRED_VQ_ENTRY);
+		if (!vq)
+			goto nla_put_failure;
+
+		if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
+			goto nla_put_failure;
+
+		if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
+			goto nla_put_failure;
+
+		/* Stats */
+		if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
+				      TCA_GRED_VQ_PAD))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
+				gred_backlog(table, q, sch)))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
+				q->stats.prob_drop))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
+				q->stats.prob_mark))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
+				q->stats.forced_drop))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
+				q->stats.forced_mark))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
+			goto nla_put_failure;
+
+		nla_nest_end(skb, vq);
+	}
+	nla_nest_end(skb, vqs);
+
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
@@ -588,6 +908,7 @@ static void gred_destroy(struct Qdisc *sch)
 		if (table->tab[i])
 			gred_destroy_vq(table->tab[i]);
 	}
+	gred_offload(sch, TC_GRED_DESTROY);
 }
 
 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index f20f3a0..203659b 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -38,9 +38,8 @@ static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
 }
 
-static void mq_offload_stats(struct Qdisc *sch)
+static int mq_offload_stats(struct Qdisc *sch)
 {
-	struct net_device *dev = qdisc_dev(sch);
 	struct tc_mq_qopt_offload opt = {
 		.command = TC_MQ_STATS,
 		.handle = sch->handle,
@@ -50,8 +49,7 @@ static void mq_offload_stats(struct Qdisc *sch)
 		},
 	};
 
-	if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc)
-		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
+	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
 }
 
 static void mq_destroy(struct Qdisc *sch)
@@ -171,9 +169,8 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
 
 		spin_unlock_bh(qdisc_lock(qdisc));
 	}
-	mq_offload_stats(sch);
 
-	return 0;
+	return mq_offload_stats(sch);
 }
 
 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
@@ -196,6 +193,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
 		    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+	struct tc_mq_qopt_offload graft_offload;
 	struct net_device *dev = qdisc_dev(sch);
 
 	if (dev->flags & IFF_UP)
@@ -206,6 +204,14 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
 	if (dev->flags & IFF_UP)
 		dev_activate(dev);
+
+	graft_offload.handle = sch->handle;
+	graft_offload.graft_params.queue = cl - 1;
+	graft_offload.graft_params.child_handle = new ? new->handle : 0;
+	graft_offload.command = TC_MQ_GRAFT;
+
+	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
+				   TC_SETUP_QDISC_MQ, &graft_offload, extack);
 	return 0;
 }
 
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f8af986..cdf6870 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -220,7 +220,6 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
 
 		qdisc_tree_reduce_backlog(child, child->q.qlen,
 					  child->qstats.backlog);
-		qdisc_put(child);
 	}
 
 	for (i = oldbands; i < q->bands; i++) {
@@ -230,6 +229,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
 	}
 
 	sch_tree_unlock(sch);
+
+	for (i = q->bands; i < oldbands; i++)
+		qdisc_put(q->queues[i]);
 	return 0;
 }
 
@@ -251,7 +253,6 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt,
 
 static int prio_dump_offload(struct Qdisc *sch)
 {
-	struct net_device *dev = qdisc_dev(sch);
 	struct tc_prio_qopt_offload hw_stats = {
 		.command = TC_PRIO_STATS,
 		.handle = sch->handle,
@@ -263,21 +264,8 @@ static int prio_dump_offload(struct Qdisc *sch)
 			},
 		},
 	};
-	int err;
 
-	sch->flags &= ~TCQ_F_OFFLOADED;
-	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
-		return 0;
-
-	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
-					    &hw_stats);
-	if (err == -EOPNOTSUPP)
-		return 0;
-
-	if (!err)
-		sch->flags |= TCQ_F_OFFLOADED;
-
-	return err;
+	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -309,43 +297,22 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	struct tc_prio_qopt_offload graft_offload;
-	struct net_device *dev = qdisc_dev(sch);
 	unsigned long band = arg - 1;
-	bool any_qdisc_is_offloaded;
-	int err;
 
 	if (new == NULL)
 		new = &noop_qdisc;
 
 	*old = qdisc_replace(sch, new, &q->queues[band]);
 
-	if (!tc_can_offload(dev))
-		return 0;
-
 	graft_offload.handle = sch->handle;
 	graft_offload.parent = sch->parent;
 	graft_offload.graft_params.band = band;
 	graft_offload.graft_params.child_handle = new->handle;
 	graft_offload.command = TC_PRIO_GRAFT;
 
-	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
-					    &graft_offload);
-
-	/* Don't report error if the graft is part of destroy operation. */
-	if (err && new != &noop_qdisc) {
-		/* Don't report error if the parent, the old child and the new
-		 * one are not offloaded.
-		 */
-		any_qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
-		any_qdisc_is_offloaded |= new->flags & TCQ_F_OFFLOADED;
-		if (*old)
-			any_qdisc_is_offloaded |= (*old)->flags &
-						   TCQ_F_OFFLOADED;
-
-		if (any_qdisc_is_offloaded)
-			NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
-	}
-
+	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
+				   TC_SETUP_QDISC_PRIO, &graft_offload,
+				   extack);
 	return 0;
 }
 
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3ce6c0a..9df9942 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -166,7 +166,9 @@ static int red_offload(struct Qdisc *sch, bool enable)
 		opt.set.min = q->parms.qth_min >> q->parms.Wlog;
 		opt.set.max = q->parms.qth_max >> q->parms.Wlog;
 		opt.set.probability = q->parms.max_P;
+		opt.set.limit = q->limit;
 		opt.set.is_ecn = red_use_ecn(q);
+		opt.set.is_harddrop = red_use_harddrop(q);
 		opt.set.qstats = &sch->qstats;
 	} else {
 		opt.command = TC_RED_DESTROY;
@@ -193,10 +195,10 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
 static int red_change(struct Qdisc *sch, struct nlattr *opt,
 		      struct netlink_ext_ack *extack)
 {
+	struct Qdisc *old_child = NULL, *child = NULL;
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_RED_MAX + 1];
 	struct tc_red_qopt *ctl;
-	struct Qdisc *child = NULL;
 	int err;
 	u32 max_P;
 
@@ -233,7 +235,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
 	if (child) {
 		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
 					  q->qdisc->qstats.backlog);
-		qdisc_put(q->qdisc);
+		old_child = q->qdisc;
 		q->qdisc = child;
 	}
 
@@ -252,7 +254,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
 		red_start_of_idle_period(&q->vars);
 
 	sch_tree_unlock(sch);
+
 	red_offload(sch, true);
+
+	if (old_child)
+		qdisc_put(old_child);
 	return 0;
 }
 
@@ -279,9 +285,8 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
 	return red_change(sch, opt, extack);
 }
 
-static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
+static int red_dump_offload_stats(struct Qdisc *sch)
 {
-	struct net_device *dev = qdisc_dev(sch);
 	struct tc_red_qopt_offload hw_stats = {
 		.command = TC_RED_STATS,
 		.handle = sch->handle,
@@ -291,22 +296,8 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
 			.stats.qstats = &sch->qstats,
 		},
 	};
-	int err;
 
-	sch->flags &= ~TCQ_F_OFFLOADED;
-
-	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
-		return 0;
-
-	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
-					    &hw_stats);
-	if (err == -EOPNOTSUPP)
-		return 0;
-
-	if (!err)
-		sch->flags |= TCQ_F_OFFLOADED;
-
-	return err;
+	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
 }
 
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -324,7 +315,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
 	};
 	int err;
 
-	err = red_dump_offload_stats(sch, &opt);
+	err = red_dump_offload_stats(sch);
 	if (err)
 		goto nla_put_failure;
 
@@ -377,6 +368,21 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
 	return 0;
 }
 
+static void red_graft_offload(struct Qdisc *sch,
+			      struct Qdisc *new, struct Qdisc *old,
+			      struct netlink_ext_ack *extack)
+{
+	struct tc_red_qopt_offload graft_offload = {
+		.handle		= sch->handle,
+		.parent		= sch->parent,
+		.child_handle	= new->handle,
+		.command	= TC_RED_GRAFT,
+	};
+
+	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
+				   TC_SETUP_QDISC_RED, &graft_offload, extack);
+}
+
 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
@@ -386,6 +392,8 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 		new = &noop_qdisc;
 
 	*old = qdisc_replace(sch, new, &q->qdisc);
+
+	red_graft_offload(sch, new, *old, extack);
 	return 0;
 }
 
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6a28b96..685c7ef 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -135,6 +135,8 @@ static struct sctp_association *sctp_association_init(
 	 */
 	asoc->max_burst = sp->max_burst;
 
+	asoc->subscribe = sp->subscribe;
+
 	/* initialize association timers */
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 7df3704..ebf28ad 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -337,6 +337,34 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
 	return match;
 }
 
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+			  struct sctp_sock *sp2, int cnt2)
+{
+	struct sctp_bind_addr *bp2 = &sp2->ep->base.bind_addr;
+	struct sctp_bind_addr *bp = &sp->ep->base.bind_addr;
+	struct sctp_sockaddr_entry *laddr, *laddr2;
+	bool exist = false;
+	int cnt = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+		list_for_each_entry_rcu(laddr2, &bp2->address_list, list) {
+			if (sp->pf->af->cmp_addr(&laddr->a, &laddr2->a) &&
+			    laddr->valid && laddr2->valid) {
+				exist = true;
+				goto next;
+			}
+		}
+		cnt = 0;
+		break;
+next:
+		cnt++;
+	}
+	rcu_read_unlock();
+
+	return (cnt == cnt2) ? 0 : (exist ? -EEXIST : 1);
+}
+
 /* Does the address 'addr' conflict with any addresses in
  * the bp.
  */
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index ce80878..0b203b8 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -86,11 +86,10 @@ void sctp_datamsg_free(struct sctp_datamsg *msg)
 /* Final destructruction of datamsg memory. */
 static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
 {
+	struct sctp_association *asoc = NULL;
 	struct list_head *pos, *temp;
 	struct sctp_chunk *chunk;
-	struct sctp_sock *sp;
 	struct sctp_ulpevent *ev;
-	struct sctp_association *asoc = NULL;
 	int error = 0, notify;
 
 	/* If we failed, we may need to notify. */
@@ -108,9 +107,8 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
 			else
 				error = asoc->outqueue.error;
 
-			sp = sctp_sk(asoc->base.sk);
-			notify = sctp_ulpevent_type_enabled(SCTP_SEND_FAILED,
-							    &sp->subscribe);
+			notify = sctp_ulpevent_type_enabled(asoc->subscribe,
+							    SCTP_SEND_FAILED);
 		}
 
 		/* Generate a SEND FAILED event only if enabled. */
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5c36a99..d7a649d 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -57,6 +57,7 @@
 #include <net/sctp/checksum.h>
 #include <net/net_namespace.h>
 #include <linux/rhashtable.h>
+#include <net/sock_reuseport.h>
 
 /* Forward declarations for internal helpers. */
 static int sctp_rcv_ootb(struct sk_buff *);
@@ -65,8 +66,10 @@ static struct sctp_association *__sctp_rcv_lookup(struct net *net,
 				      const union sctp_addr *paddr,
 				      const union sctp_addr *laddr,
 				      struct sctp_transport **transportp);
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
-						const union sctp_addr *laddr);
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
+					struct net *net, struct sk_buff *skb,
+					const union sctp_addr *laddr,
+					const union sctp_addr *daddr);
 static struct sctp_association *__sctp_lookup_association(
 					struct net *net,
 					const union sctp_addr *local,
@@ -171,7 +174,7 @@ int sctp_rcv(struct sk_buff *skb)
 	asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 
 	if (!asoc)
-		ep = __sctp_rcv_lookup_endpoint(net, &dest);
+		ep = __sctp_rcv_lookup_endpoint(net, skb, &dest, &src);
 
 	/* Retrieve the common input handling substructure. */
 	rcvr = asoc ? &asoc->base : &ep->base;
@@ -574,7 +577,7 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
  * is probably better.
  *
  */
-void sctp_v4_err(struct sk_buff *skb, __u32 info)
+int sctp_v4_err(struct sk_buff *skb, __u32 info)
 {
 	const struct iphdr *iph = (const struct iphdr *)skb->data;
 	const int ihlen = iph->ihl * 4;
@@ -599,7 +602,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
 	skb->transport_header = savesctp;
 	if (!sk) {
 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
-		return;
+		return -ENOENT;
 	}
 	/* Warning:  The sock lock is held.  Remember to call
 	 * sctp_err_finish!
@@ -653,6 +656,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
 
 out_unlock:
 	sctp_err_finish(sk, transport);
+	return 0;
 }
 
 /*
@@ -720,43 +724,87 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
 }
 
 /* Insert endpoint into the hash table.  */
-static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
+static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
 {
-	struct net *net = sock_net(ep->base.sk);
-	struct sctp_ep_common *epb;
+	struct sock *sk = ep->base.sk;
+	struct net *net = sock_net(sk);
 	struct sctp_hashbucket *head;
+	struct sctp_ep_common *epb;
 
 	epb = &ep->base;
-
 	epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 	head = &sctp_ep_hashtable[epb->hashent];
 
+	if (sk->sk_reuseport) {
+		bool any = sctp_is_ep_boundall(sk);
+		struct sctp_ep_common *epb2;
+		struct list_head *list;
+		int cnt = 0, err = 1;
+
+		list_for_each(list, &ep->base.bind_addr.address_list)
+			cnt++;
+
+		sctp_for_each_hentry(epb2, &head->chain) {
+			struct sock *sk2 = epb2->sk;
+
+			if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
+			    !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
+			    !sk2->sk_reuseport)
+				continue;
+
+			err = sctp_bind_addrs_check(sctp_sk(sk2),
+						    sctp_sk(sk), cnt);
+			if (!err) {
+				err = reuseport_add_sock(sk, sk2, any);
+				if (err)
+					return err;
+				break;
+			} else if (err < 0) {
+				return err;
+			}
+		}
+
+		if (err) {
+			err = reuseport_alloc(sk, any);
+			if (err)
+				return err;
+		}
+	}
+
 	write_lock(&head->lock);
 	hlist_add_head(&epb->node, &head->chain);
 	write_unlock(&head->lock);
+	return 0;
 }
 
 /* Add an endpoint to the hash. Local BH-safe. */
-void sctp_hash_endpoint(struct sctp_endpoint *ep)
+int sctp_hash_endpoint(struct sctp_endpoint *ep)
 {
+	int err;
+
 	local_bh_disable();
-	__sctp_hash_endpoint(ep);
+	err = __sctp_hash_endpoint(ep);
 	local_bh_enable();
+
+	return err;
 }
 
 /* Remove endpoint from the hash table.  */
 static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 {
-	struct net *net = sock_net(ep->base.sk);
+	struct sock *sk = ep->base.sk;
 	struct sctp_hashbucket *head;
 	struct sctp_ep_common *epb;
 
 	epb = &ep->base;
 
-	epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
+	epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port);
 
 	head = &sctp_ep_hashtable[epb->hashent];
 
+	if (rcu_access_pointer(sk->sk_reuseport_cb))
+		reuseport_detach_sock(sk);
+
 	write_lock(&head->lock);
 	hlist_del_init(&epb->node);
 	write_unlock(&head->lock);
@@ -770,16 +818,35 @@ void sctp_unhash_endpoint(struct sctp_endpoint *ep)
 	local_bh_enable();
 }
 
+static inline __u32 sctp_hashfn(const struct net *net, __be16 lport,
+				const union sctp_addr *paddr, __u32 seed)
+{
+	__u32 addr;
+
+	if (paddr->sa.sa_family == AF_INET6)
+		addr = jhash(&paddr->v6.sin6_addr, 16, seed);
+	else
+		addr = (__force __u32)paddr->v4.sin_addr.s_addr;
+
+	return  jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
+			     (__force __u32)lport, net_hash_mix(net), seed);
+}
+
 /* Look up an endpoint. */
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
-						const union sctp_addr *laddr)
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
+					struct net *net, struct sk_buff *skb,
+					const union sctp_addr *laddr,
+					const union sctp_addr *paddr)
 {
 	struct sctp_hashbucket *head;
 	struct sctp_ep_common *epb;
 	struct sctp_endpoint *ep;
+	struct sock *sk;
+	__be16 lport;
 	int hash;
 
-	hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
+	lport = laddr->v4.sin_port;
+	hash = sctp_ep_hashfn(net, ntohs(lport));
 	head = &sctp_ep_hashtable[hash];
 	read_lock(&head->lock);
 	sctp_for_each_hentry(epb, &head->chain) {
@@ -791,6 +858,15 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
 	ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
 hit:
+	sk = ep->base.sk;
+	if (sk->sk_reuseport) {
+		__u32 phash = sctp_hashfn(net, lport, paddr, 0);
+
+		sk = reuseport_select_sock(sk, phash, skb,
+					   sizeof(struct sctphdr));
+		if (sk)
+			ep = sctp_sk(sk)->ep;
+	}
 	sctp_endpoint_hold(ep);
 	read_unlock(&head->lock);
 	return ep;
@@ -829,35 +905,17 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
 static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
 {
 	const struct sctp_transport *t = data;
-	const union sctp_addr *paddr = &t->ipaddr;
-	const struct net *net = sock_net(t->asoc->base.sk);
-	__be16 lport = htons(t->asoc->base.bind_addr.port);
-	__u32 addr;
 
-	if (paddr->sa.sa_family == AF_INET6)
-		addr = jhash(&paddr->v6.sin6_addr, 16, seed);
-	else
-		addr = (__force __u32)paddr->v4.sin_addr.s_addr;
-
-	return  jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
-			     (__force __u32)lport, net_hash_mix(net), seed);
+	return sctp_hashfn(sock_net(t->asoc->base.sk),
+			   htons(t->asoc->base.bind_addr.port),
+			   &t->ipaddr, seed);
 }
 
 static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
 {
 	const struct sctp_hash_cmp_arg *x = data;
-	const union sctp_addr *paddr = x->paddr;
-	const struct net *net = x->net;
-	__be16 lport = x->lport;
-	__u32 addr;
 
-	if (paddr->sa.sa_family == AF_INET6)
-		addr = jhash(&paddr->v6.sin6_addr, 16, seed);
-	else
-		addr = (__force __u32)paddr->v4.sin_addr.s_addr;
-
-	return  jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
-			     (__force __u32)lport, net_hash_mix(net), seed);
+	return sctp_hashfn(x->net, x->lport, x->paddr, seed);
 }
 
 static const struct rhashtable_params sctp_hash_params = {
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index fc6c5e4..6e27c62 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -138,7 +138,7 @@ static struct notifier_block sctp_inet6addr_notifier = {
 };
 
 /* ICMP error handler. */
-static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			u8 type, u8 code, int offset, __be32 info)
 {
 	struct inet6_dev *idev;
@@ -147,7 +147,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	struct sctp_transport *transport;
 	struct ipv6_pinfo *np;
 	__u16 saveip, savesctp;
-	int err;
+	int err, ret = 0;
 	struct net *net = dev_net(skb->dev);
 
 	idev = in6_dev_get(skb->dev);
@@ -163,6 +163,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	skb->transport_header = savesctp;
 	if (!sk) {
 		__ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
+		ret = -ENOENT;
 		goto out;
 	}
 
@@ -202,6 +203,8 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 out:
 	if (likely(idev != NULL))
 		in6_dev_put(idev);
+
+	return ret;
 }
 
 static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index c0817f7a..a8c4c33 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -53,7 +53,7 @@
 int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
 			    void *arg) { \
 	int error = 0; \
-	enum sctp_event event_type; union sctp_subtype subtype; \
+	enum sctp_event_type event_type; union sctp_subtype subtype; \
 	enum sctp_state state; \
 	struct sctp_endpoint *ep; \
 	\
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 85d3930..1d143bc 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -52,7 +52,7 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/stream_sched.h>
 
-static int sctp_cmd_interpreter(enum sctp_event event_type,
+static int sctp_cmd_interpreter(enum sctp_event_type event_type,
 				union sctp_subtype subtype,
 				enum sctp_state state,
 				struct sctp_endpoint *ep,
@@ -61,7 +61,7 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 				enum sctp_disposition status,
 				struct sctp_cmd_seq *commands,
 				gfp_t gfp);
-static int sctp_side_effects(enum sctp_event event_type,
+static int sctp_side_effects(enum sctp_event_type event_type,
 			     union sctp_subtype subtype,
 			     enum sctp_state state,
 			     struct sctp_endpoint *ep,
@@ -623,7 +623,7 @@ static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED.  */
 static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
 				  struct sctp_association *asoc,
-				  enum sctp_event event_type,
+				  enum sctp_event_type event_type,
 				  union sctp_subtype subtype,
 				  struct sctp_chunk *chunk,
 				  unsigned int error)
@@ -1162,7 +1162,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
  * If you want to understand all of lksctp, this is a
  * good place to start.
  */
-int sctp_do_sm(struct net *net, enum sctp_event event_type,
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
 	       union sctp_subtype subtype, enum sctp_state state,
 	       struct sctp_endpoint *ep, struct sctp_association *asoc,
 	       void *event_arg, gfp_t gfp)
@@ -1199,7 +1199,7 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type,
 /*****************************************************************
  * This the master state function side effect processing function.
  *****************************************************************/
-static int sctp_side_effects(enum sctp_event event_type,
+static int sctp_side_effects(enum sctp_event_type event_type,
 			     union sctp_subtype subtype,
 			     enum sctp_state state,
 			     struct sctp_endpoint *ep,
@@ -1285,7 +1285,7 @@ static int sctp_side_effects(enum sctp_event event_type,
  ********************************************************************/
 
 /* This is the side-effect interpreter.  */
-static int sctp_cmd_interpreter(enum sctp_event event_type,
+static int sctp_cmd_interpreter(enum sctp_event_type event_type,
 				union sctp_subtype subtype,
 				enum sctp_state state,
 				struct sctp_endpoint *ep,
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 691d9dc..d239b94 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -79,7 +79,7 @@ static const struct sctp_sm_table_entry bug = {
 
 const struct sctp_sm_table_entry *sctp_sm_lookup_event(
 					struct net *net,
-					enum sctp_event event_type,
+					enum sctp_event_type event_type,
 					enum sctp_state state,
 					union sctp_subtype event_subtype)
 {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index bf618d1..1fb2cad 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2230,7 +2230,7 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	if (sp->recvrcvinfo)
 		sctp_ulpevent_read_rcvinfo(event, msg);
 	/* Check if we allow SCTP_SNDRCVINFO. */
-	if (sp->subscribe.sctp_data_io_event)
+	if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_DATA_IO_EVENT))
 		sctp_ulpevent_read_sndrcvinfo(event, msg);
 
 	err = copied;
@@ -2304,22 +2304,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
 				  unsigned int optlen)
 {
+	struct sctp_event_subscribe subscribe;
+	__u8 *sn_type = (__u8 *)&subscribe;
+	struct sctp_sock *sp = sctp_sk(sk);
 	struct sctp_association *asoc;
-	struct sctp_ulpevent *event;
+	int i;
 
 	if (optlen > sizeof(struct sctp_event_subscribe))
 		return -EINVAL;
-	if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
+
+	if (copy_from_user(&subscribe, optval, optlen))
 		return -EFAULT;
 
+	for (i = 0; i < optlen; i++)
+		sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i,
+				       sn_type[i]);
+
+	list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+		asoc->subscribe = sctp_sk(sk)->subscribe;
+
 	/* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
 	 * if there is no data to be sent or retransmit, the stack will
 	 * immediately send up this notification.
 	 */
-	if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
-				       &sctp_sk(sk)->subscribe)) {
-		asoc = sctp_id2assoc(sk, 0);
+	if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_SENDER_DRY_EVENT)) {
+		struct sctp_ulpevent *event;
 
+		asoc = sctp_id2assoc(sk, 0);
 		if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
 			event = sctp_ulpevent_make_sender_dry_event(asoc,
 					GFP_USER | __GFP_NOWARN);
@@ -4261,6 +4272,57 @@ static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
 	return 0;
 }
 
+static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
+				 unsigned int optlen)
+{
+	struct sctp_association *asoc;
+	struct sctp_ulpevent *event;
+	struct sctp_event param;
+	int retval = 0;
+
+	if (optlen < sizeof(param)) {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	optlen = sizeof(param);
+	if (copy_from_user(&param, optval, optlen)) {
+		retval = -EFAULT;
+		goto out;
+	}
+
+	if (param.se_type < SCTP_SN_TYPE_BASE ||
+	    param.se_type > SCTP_SN_TYPE_MAX) {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	asoc = sctp_id2assoc(sk, param.se_assoc_id);
+	if (!asoc) {
+		sctp_ulpevent_type_set(&sctp_sk(sk)->subscribe,
+				       param.se_type, param.se_on);
+		goto out;
+	}
+
+	sctp_ulpevent_type_set(&asoc->subscribe, param.se_type, param.se_on);
+
+	if (param.se_type == SCTP_SENDER_DRY_EVENT && param.se_on) {
+		if (sctp_outq_is_empty(&asoc->outqueue)) {
+			event = sctp_ulpevent_make_sender_dry_event(asoc,
+					GFP_USER | __GFP_NOWARN);
+			if (!event) {
+				retval = -ENOMEM;
+				goto out;
+			}
+
+			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+		}
+	}
+
+out:
+	return retval;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4458,6 +4520,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
 	case SCTP_REUSE_PORT:
 		retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
 		break;
+	case SCTP_EVENT:
+		retval = sctp_setsockopt_event(sk, optval, optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -4706,7 +4771,7 @@ static int sctp_init_sock(struct sock *sk)
 	/* Initialize default event subscriptions. By default, all the
 	 * options are off.
 	 */
-	memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
+	sp->subscribe = 0;
 
 	/* Default Peer Address Parameters.  These defaults can
 	 * be modified via SCTP_PEER_ADDR_PARAMS
@@ -5251,14 +5316,24 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
 				  int __user *optlen)
 {
+	struct sctp_event_subscribe subscribe;
+	__u8 *sn_type = (__u8 *)&subscribe;
+	int i;
+
 	if (len == 0)
 		return -EINVAL;
 	if (len > sizeof(struct sctp_event_subscribe))
 		len = sizeof(struct sctp_event_subscribe);
 	if (put_user(len, optlen))
 		return -EFAULT;
-	if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
+
+	for (i = 0; i < len; i++)
+		sn_type[i] = sctp_ulpevent_type_enabled(sctp_sk(sk)->subscribe,
+							SCTP_SN_TYPE_BASE + i);
+
+	if (copy_to_user(optval, &subscribe, len))
 		return -EFAULT;
+
 	return 0;
 }
 
@@ -7393,6 +7468,37 @@ static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
 	return 0;
 }
 
+static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval,
+				 int __user *optlen)
+{
+	struct sctp_association *asoc;
+	struct sctp_event param;
+	__u16 subscribe;
+
+	if (len < sizeof(param))
+		return -EINVAL;
+
+	len = sizeof(param);
+	if (copy_from_user(&param, optval, len))
+		return -EFAULT;
+
+	if (param.se_type < SCTP_SN_TYPE_BASE ||
+	    param.se_type > SCTP_SN_TYPE_MAX)
+		return -EINVAL;
+
+	asoc = sctp_id2assoc(sk, param.se_assoc_id);
+	subscribe = asoc ? asoc->subscribe : sctp_sk(sk)->subscribe;
+	param.se_on = sctp_ulpevent_type_enabled(subscribe, param.se_type);
+
+	if (put_user(len, optlen))
+		return -EFAULT;
+
+	if (copy_to_user(optval, &param, len))
+		return -EFAULT;
+
+	return 0;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
 			   char __user *optval, int __user *optlen)
 {
@@ -7591,6 +7697,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
 	case SCTP_REUSE_PORT:
 		retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
 		break;
+	case SCTP_EVENT:
+		retval = sctp_getsockopt_event(sk, len, optval, optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -7628,8 +7737,10 @@ static struct sctp_bind_bucket *sctp_bucket_create(
 
 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
-	bool reuse = (sk->sk_reuse || sctp_sk(sk)->reuse);
+	struct sctp_sock *sp = sctp_sk(sk);
+	bool reuse = (sk->sk_reuse || sp->reuse);
 	struct sctp_bind_hashbucket *head; /* hash list */
+	kuid_t uid = sock_i_uid(sk);
 	struct sctp_bind_bucket *pp;
 	unsigned short snum;
 	int ret;
@@ -7705,7 +7816,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 
 		pr_debug("%s: found a possible match\n", __func__);
 
-		if (pp->fastreuse && reuse && sk->sk_state != SCTP_SS_LISTENING)
+		if ((pp->fastreuse && reuse &&
+		     sk->sk_state != SCTP_SS_LISTENING) ||
+		    (pp->fastreuseport && sk->sk_reuseport &&
+		     uid_eq(pp->fastuid, uid)))
 			goto success;
 
 		/* Run through the list of sockets bound to the port
@@ -7719,16 +7833,18 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 		 * in an endpoint.
 		 */
 		sk_for_each_bound(sk2, &pp->owner) {
-			struct sctp_endpoint *ep2;
-			ep2 = sctp_sk(sk2)->ep;
+			struct sctp_sock *sp2 = sctp_sk(sk2);
+			struct sctp_endpoint *ep2 = sp2->ep;
 
 			if (sk == sk2 ||
-			    (reuse && (sk2->sk_reuse || sctp_sk(sk2)->reuse) &&
-			     sk2->sk_state != SCTP_SS_LISTENING))
+			    (reuse && (sk2->sk_reuse || sp2->reuse) &&
+			     sk2->sk_state != SCTP_SS_LISTENING) ||
+			    (sk->sk_reuseport && sk2->sk_reuseport &&
+			     uid_eq(uid, sock_i_uid(sk2))))
 				continue;
 
-			if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
-						 sctp_sk(sk2), sctp_sk(sk))) {
+			if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
+						    addr, sp2, sp)) {
 				ret = (long)sk2;
 				goto fail_unlock;
 			}
@@ -7751,19 +7867,32 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 			pp->fastreuse = 1;
 		else
 			pp->fastreuse = 0;
-	} else if (pp->fastreuse &&
-		   (!reuse || sk->sk_state == SCTP_SS_LISTENING))
-		pp->fastreuse = 0;
+
+		if (sk->sk_reuseport) {
+			pp->fastreuseport = 1;
+			pp->fastuid = uid;
+		} else {
+			pp->fastreuseport = 0;
+		}
+	} else {
+		if (pp->fastreuse &&
+		    (!reuse || sk->sk_state == SCTP_SS_LISTENING))
+			pp->fastreuse = 0;
+
+		if (pp->fastreuseport &&
+		    (!sk->sk_reuseport || !uid_eq(pp->fastuid, uid)))
+			pp->fastreuseport = 0;
+	}
 
 	/* We are set, so fill up all the data in the hash table
 	 * entry, tie the socket list information with the rest of the
 	 * sockets FIXME: Blurry, NPI (ipg).
 	 */
 success:
-	if (!sctp_sk(sk)->bind_hash) {
+	if (!sp->bind_hash) {
 		inet_sk(sk)->inet_num = snum;
 		sk_add_bind_node(sk, &pp->owner);
-		sctp_sk(sk)->bind_hash = pp;
+		sp->bind_hash = pp;
 	}
 	ret = 0;
 
@@ -7836,8 +7965,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
 	}
 
 	sk->sk_max_ack_backlog = backlog;
-	sctp_hash_endpoint(ep);
-	return 0;
+	return sctp_hash_endpoint(ep);
 }
 
 /*
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 0a78cdf..a6bf215 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -140,7 +140,7 @@ static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
 				  struct sctp_ulpevent *event)
 {
 	struct sctp_ulpevent *cevent;
-	struct sk_buff *pos;
+	struct sk_buff *pos, *loc;
 
 	pos = skb_peek_tail(&ulpq->reasm);
 	if (!pos) {
@@ -166,23 +166,30 @@ static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
 		return;
 	}
 
+	loc = NULL;
 	skb_queue_walk(&ulpq->reasm, pos) {
 		cevent = sctp_skb2event(pos);
 
 		if (event->stream < cevent->stream ||
 		    (event->stream == cevent->stream &&
-		     MID_lt(event->mid, cevent->mid)))
+		     MID_lt(event->mid, cevent->mid))) {
+			loc = pos;
 			break;
-
+		}
 		if (event->stream == cevent->stream &&
 		    event->mid == cevent->mid &&
 		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
 		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
-		     event->fsn < cevent->fsn))
+		     event->fsn < cevent->fsn)) {
+			loc = pos;
 			break;
+		}
 	}
 
-	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
+	if (!loc)
+		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+	else
+		__skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
 }
 
 static struct sctp_ulpevent *sctp_intl_retrieve_partial(
@@ -383,7 +390,7 @@ static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
 				    struct sctp_ulpevent *event)
 {
 	struct sctp_ulpevent *cevent;
-	struct sk_buff *pos;
+	struct sk_buff *pos, *loc;
 
 	pos = skb_peek_tail(&ulpq->lobby);
 	if (!pos) {
@@ -403,18 +410,25 @@ static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
 		return;
 	}
 
+	loc = NULL;
 	skb_queue_walk(&ulpq->lobby, pos) {
 		cevent = (struct sctp_ulpevent *)pos->cb;
 
-		if (cevent->stream > event->stream)
+		if (cevent->stream > event->stream) {
+			loc = pos;
 			break;
-
+		}
 		if (cevent->stream == event->stream &&
-		    MID_lt(event->mid, cevent->mid))
+		    MID_lt(event->mid, cevent->mid)) {
+			loc = pos;
 			break;
+		}
 	}
 
-	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
+	if (!loc)
+		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+	else
+		__skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
 }
 
 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
@@ -489,7 +503,7 @@ static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
 		sk_incoming_cpu_update(sk);
 	}
 
-	if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
+	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
 		goto out_free;
 
 	if (skb_list)
@@ -980,17 +994,19 @@ static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
 	struct sock *sk = ulpq->asoc->base.sk;
 	struct sctp_ulpevent *ev = NULL;
 
-	if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
-					&sctp_sk(sk)->subscribe))
+	if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
+					SCTP_PARTIAL_DELIVERY_EVENT))
 		return;
 
 	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
 				      sid, mid, flags, gfp);
 	if (ev) {
+		struct sctp_sock *sp = sctp_sk(sk);
+
 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
 
-		if (!sctp_sk(sk)->data_ready_signalled) {
-			sctp_sk(sk)->data_ready_signalled = 1;
+		if (!sp->data_ready_signalled) {
+			sp->data_ready_signalled = 1;
 			sk->sk_data_ready(sk);
 		}
 	}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 331cc73..5dde921 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -219,7 +219,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 		sk_incoming_cpu_update(sk);
 	}
 	/* Check if the user wishes to receive this event.  */
-	if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
+	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
 		goto out_free;
 
 	/* If we are in partial delivery mode, post to the lobby until
@@ -1129,16 +1129,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
 {
 	struct sctp_ulpevent *ev = NULL;
-	struct sock *sk;
 	struct sctp_sock *sp;
+	struct sock *sk;
 
 	if (!ulpq->pd_mode)
 		return;
 
 	sk = ulpq->asoc->base.sk;
 	sp = sctp_sk(sk);
-	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
-				       &sctp_sk(sk)->subscribe))
+	if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
+				       SCTP_PARTIAL_DELIVERY_EVENT))
 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
 					      SCTP_PARTIAL_DELIVERY_ABORTED,
 					      0, 0, 0, gfp);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5fbaf19..63f08b4 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -301,14 +301,17 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
 	smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
 }
 
-/* register a new rmb, optionally send confirm_rkey msg to register with peer */
+/* register a new rmb, send confirm_rkey msg to register with peer */
 static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
 		       bool conf_rkey)
 {
-	/* register memory region for new rmb */
-	if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
-		rmb_desc->regerr = 1;
-		return -EFAULT;
+	if (!rmb_desc->wr_reg) {
+		/* register memory region for new rmb */
+		if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
+			rmb_desc->regerr = 1;
+			return -EFAULT;
+		}
+		rmb_desc->wr_reg = 1;
 	}
 	if (!conf_rkey)
 		return 0;
@@ -337,8 +340,8 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
 		struct smc_clc_msg_decline dclc;
 
 		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
-				      SMC_CLC_DECLINE);
-		return rc;
+				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
 	}
 
 	if (link->llc_confirm_rc)
@@ -365,8 +368,8 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
 		struct smc_clc_msg_decline dclc;
 
 		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
-				      SMC_CLC_DECLINE);
-		return rc;
+				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
 	}
 
 	/* send add link reject message, only one link supported for now */
@@ -535,7 +538,8 @@ static int smc_connect_clc(struct smc_sock *smc, int smc_type,
 	if (rc)
 		return rc;
 	/* receive SMC Accept CLC message */
-	return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT);
+	return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
+				CLC_WAIT_TIME);
 }
 
 /* setup for RDMA connection of client */
@@ -583,8 +587,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
 			return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
 						 local_contact);
 	} else {
-		if (!smc->conn.rmb_desc->reused &&
-		    smc_reg_rmb(link, smc->conn.rmb_desc, true))
+		if (smc_reg_rmb(link, smc->conn.rmb_desc, true))
 			return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
 						 local_contact);
 	}
@@ -968,8 +971,8 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
 		struct smc_clc_msg_decline dclc;
 
 		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
-				      SMC_CLC_DECLINE);
-		return rc;
+				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
 	}
 
 	if (link->llc_confirm_resp_rc)
@@ -989,8 +992,8 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
 		struct smc_clc_msg_decline dclc;
 
 		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
-				      SMC_CLC_DECLINE);
-		return rc;
+				      SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+		return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
 	}
 
 	smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
@@ -1145,10 +1148,8 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
 	struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
 
 	if (local_contact != SMC_FIRST_CONTACT) {
-		if (!new_smc->conn.rmb_desc->reused) {
-			if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
-				return SMC_CLC_DECL_ERR_REGRMB;
-		}
+		if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
+			return SMC_CLC_DECL_ERR_REGRMB;
 	}
 	smc_rmb_sync_sg_for_device(&new_smc->conn);
 
@@ -1184,7 +1185,6 @@ static int smc_listen_rdma_finish(struct smc_sock *new_smc,
 	return 0;
 
 decline:
-	mutex_unlock(&smc_create_lgr_pending);
 	smc_listen_decline(new_smc, reason_code, local_contact);
 	return reason_code;
 }
@@ -1225,7 +1225,7 @@ static void smc_listen_work(struct work_struct *work)
 	 */
 	pclc = (struct smc_clc_msg_proposal *)&buf;
 	reason_code = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
-				       SMC_CLC_PROPOSAL);
+				       SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
 	if (reason_code) {
 		smc_listen_decline(new_smc, reason_code, 0);
 		return;
@@ -1275,7 +1275,7 @@ static void smc_listen_work(struct work_struct *work)
 
 	/* receive SMC Confirm CLC message */
 	reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
-				       SMC_CLC_CONFIRM);
+				       SMC_CLC_CONFIRM, CLC_WAIT_TIME);
 	if (reason_code) {
 		mutex_unlock(&smc_create_lgr_pending);
 		smc_listen_decline(new_smc, reason_code, local_contact);
@@ -1284,8 +1284,10 @@ static void smc_listen_work(struct work_struct *work)
 
 	/* finish worker */
 	if (!ism_supported) {
-		if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
+		if (smc_listen_rdma_finish(new_smc, &cclc, local_contact)) {
+			mutex_unlock(&smc_create_lgr_pending);
 			return;
+		}
 	}
 	smc_conn_save_peer_info(new_smc, &cclc);
 	mutex_unlock(&smc_create_lgr_pending);
@@ -1357,7 +1359,6 @@ static int smc_listen(struct socket *sock, int backlog)
 	sk->sk_max_ack_backlog = backlog;
 	sk->sk_ack_backlog = 0;
 	sk->sk_state = SMC_LISTEN;
-	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
 	sock_hold(sk); /* sock_hold in tcp_listen_worker */
 	if (!schedule_work(&smc->tcp_listen_work))
 		sock_put(sk);
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 89c3a8c..776e9df 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -265,7 +265,7 @@ int smc_clc_prfx_match(struct socket *clcsock,
  * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
  */
 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
-		     u8 expected_type)
+		     u8 expected_type, unsigned long timeout)
 {
 	long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
 	struct sock *clc_sk = smc->clcsock->sk;
@@ -285,7 +285,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 	 * sizeof(struct smc_clc_msg_hdr)
 	 */
 	krflags = MSG_PEEK | MSG_WAITALL;
-	smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
+	clc_sk->sk_rcvtimeo = timeout;
 	iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
 			sizeof(struct smc_clc_msg_hdr));
 	len = sock_recvmsg(smc->clcsock, &msg, krflags);
@@ -297,7 +297,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 	}
 	if (clc_sk->sk_err) {
 		reason_code = -clc_sk->sk_err;
-		smc->sk.sk_err = clc_sk->sk_err;
+		if (clc_sk->sk_err == EAGAIN &&
+		    expected_type == SMC_CLC_DECLINE)
+			clc_sk->sk_err = 0; /* reset for fallback usage */
+		else
+			smc->sk.sk_err = clc_sk->sk_err;
 		goto out;
 	}
 	if (!len) { /* peer has performed orderly shutdown */
@@ -306,7 +310,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 		goto out;
 	}
 	if (len < 0) {
-		smc->sk.sk_err = -len;
+		if (len != -EAGAIN || expected_type != SMC_CLC_DECLINE)
+			smc->sk.sk_err = -len;
 		reason_code = len;
 		goto out;
 	}
@@ -346,7 +351,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 	}
 
 out:
-	smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
+	clc_sk->sk_rcvtimeo = rcvtimeo;
 	return reason_code;
 }
 
@@ -374,10 +379,8 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
 	len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
 			     sizeof(struct smc_clc_msg_decline));
 	if (len < sizeof(struct smc_clc_msg_decline))
-		smc->sk.sk_err = EPROTO;
-	if (len < 0)
-		smc->sk.sk_err = -len;
-	return sock_error(&smc->sk);
+		len = -EPROTO;
+	return len > 0 ? 0 : len;
 }
 
 /* send CLC PROPOSAL message across internal TCP socket */
@@ -536,7 +539,6 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
 	struct smc_link *link;
 	struct msghdr msg;
 	struct kvec vec;
-	int rc = 0;
 	int len;
 
 	memset(&aclc, 0, sizeof(aclc));
@@ -589,13 +591,8 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
 	vec.iov_len = ntohs(aclc.hdr.length);
 	len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
 			     ntohs(aclc.hdr.length));
-	if (len < ntohs(aclc.hdr.length)) {
-		if (len >= 0)
-			new_smc->sk.sk_err = EPROTO;
-		else
-			new_smc->sk.sk_err = new_smc->clcsock->sk->sk_err;
-		rc = sock_error(&new_smc->sk);
-	}
+	if (len < ntohs(aclc.hdr.length))
+		len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
 
-	return rc;
+	return len > 0 ? 0 : len;
 }
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 18da89b..24658e8 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -27,6 +27,7 @@
 #define SMC_TYPE_D		1		/* SMC-D only		      */
 #define SMC_TYPE_B		3		/* SMC-R and SMC-D	      */
 #define CLC_WAIT_TIME		(6 * HZ)	/* max. wait time on clcsock  */
+#define CLC_WAIT_TIME_SHORT	HZ		/* short wait time on clcsock */
 #define SMC_CLC_DECL_MEM	0x01010000  /* insufficient memory resources  */
 #define SMC_CLC_DECL_TIMEOUT_CL	0x02010000  /* timeout w4 QP confirm link     */
 #define SMC_CLC_DECL_TIMEOUT_AL	0x02020000  /* timeout w4 QP add link	      */
@@ -182,7 +183,7 @@ struct smcd_dev;
 int smc_clc_prfx_match(struct socket *clcsock,
 		       struct smc_clc_msg_proposal_prefix *prop);
 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
-		     u8 expected_type);
+		     u8 expected_type, unsigned long timeout);
 int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
 int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
 			  struct smc_ib_device *smcibdev, u8 ibport, u8 gid[],
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 1c9fa7f..35c1cdc 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -149,6 +149,8 @@ static int smc_link_send_delete(struct smc_link *lnk)
 	return -ENOTCONN;
 }
 
+static void smc_lgr_free(struct smc_link_group *lgr);
+
 static void smc_lgr_free_work(struct work_struct *work)
 {
 	struct smc_link_group *lgr = container_of(to_delayed_work(work),
@@ -171,8 +173,11 @@ static void smc_lgr_free_work(struct work_struct *work)
 	spin_unlock_bh(&smc_lgr_list.lock);
 
 	if (!lgr->is_smcd && !lgr->terminating)	{
+		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
 		/* try to send del link msg, on error free lgr immediately */
-		if (!smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK])) {
+		if (lnk->state == SMC_LNK_ACTIVE &&
+		    !smc_link_send_delete(lnk)) {
 			/* reschedule in case we never receive a response */
 			smc_lgr_schedule_free_work(lgr);
 			return;
@@ -295,8 +300,13 @@ static void smc_buf_unuse(struct smc_connection *conn,
 		conn->sndbuf_desc->used = 0;
 	if (conn->rmb_desc) {
 		if (!conn->rmb_desc->regerr) {
-			conn->rmb_desc->reused = 1;
 			conn->rmb_desc->used = 0;
+			if (!lgr->is_smcd) {
+				/* unregister rmb with peer */
+				smc_llc_do_delete_rkey(
+						&lgr->lnk[SMC_SINGLE_LINK],
+						conn->rmb_desc);
+			}
 		} else {
 			/* buf registration failed, reuse not possible */
 			write_lock_bh(&lgr->rmbs_lock);
@@ -410,7 +420,7 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
 }
 
 /* remove a link group */
-void smc_lgr_free(struct smc_link_group *lgr)
+static void smc_lgr_free(struct smc_link_group *lgr)
 {
 	smc_lgr_free_bufs(lgr);
 	if (lgr->is_smcd)
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index cf98f4d..b002879 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -109,6 +109,9 @@ struct smc_link {
 	int			llc_testlink_time; /* testlink interval */
 	struct completion	llc_confirm_rkey; /* wait 4 rx of cnf rkey */
 	int			llc_confirm_rkey_rc; /* rc from cnf rkey msg */
+	struct completion	llc_delete_rkey; /* wait 4 rx of del rkey */
+	int			llc_delete_rkey_rc; /* rc from del rkey msg */
+	struct mutex		llc_delete_rkey_mutex; /* serialize usage */
 };
 
 /* For now we just allow one parallel link per link group. The SMC protocol
@@ -127,7 +130,7 @@ struct smc_buf_desc {
 	struct page		*pages;
 	int			len;		/* length of buffer */
 	u32			used;		/* currently used / unused */
-	u8			reused	: 1;	/* new created / reused */
+	u8			wr_reg	: 1;	/* mem region registered */
 	u8			regerr	: 1;	/* err during registration */
 	union {
 		struct { /* SMC-R */
@@ -243,7 +246,6 @@ struct smc_sock;
 struct smc_clc_msg_accept_confirm;
 struct smc_clc_msg_local;
 
-void smc_lgr_free(struct smc_link_group *lgr);
 void smc_lgr_forget(struct smc_link_group *lgr);
 void smc_lgr_terminate(struct smc_link_group *lgr);
 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 9c916c7..a6d3623 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -238,6 +238,29 @@ static int smc_llc_send_confirm_rkey(struct smc_link *link,
 	return rc;
 }
 
+/* send LLC delete rkey request */
+static int smc_llc_send_delete_rkey(struct smc_link *link,
+				    struct smc_buf_desc *rmb_desc)
+{
+	struct smc_llc_msg_delete_rkey *rkeyllc;
+	struct smc_wr_tx_pend_priv *pend;
+	struct smc_wr_buf *wr_buf;
+	int rc;
+
+	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+	if (rc)
+		return rc;
+	rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
+	memset(rkeyllc, 0, sizeof(*rkeyllc));
+	rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
+	rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
+	rkeyllc->num_rkeys = 1;
+	rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+	/* send llc message */
+	rc = smc_wr_tx_send(link, pend);
+	return rc;
+}
+
 /* prepare an add link message */
 static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc,
 				  struct smc_link *link, u8 mac[], u8 gid[],
@@ -509,7 +532,9 @@ static void smc_llc_rx_delete_rkey(struct smc_link *link,
 	int i, max;
 
 	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
-		/* unused as long as we don't send this type of msg */
+		link->llc_delete_rkey_rc = llc->hd.flags &
+					    SMC_LLC_FLAG_RKEY_NEG;
+		complete(&link->llc_delete_rkey);
 	} else {
 		max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
 		for (i = 0; i < max; i++) {
@@ -610,6 +635,8 @@ int smc_llc_link_init(struct smc_link *link)
 	init_completion(&link->llc_add);
 	init_completion(&link->llc_add_resp);
 	init_completion(&link->llc_confirm_rkey);
+	init_completion(&link->llc_delete_rkey);
+	mutex_init(&link->llc_delete_rkey_mutex);
 	init_completion(&link->llc_testlink_resp);
 	INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
 	return 0;
@@ -650,8 +677,11 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
 {
 	int rc;
 
+	/* protected by mutex smc_create_lgr_pending */
 	reinit_completion(&link->llc_confirm_rkey);
-	smc_llc_send_confirm_rkey(link, rmb_desc);
+	rc = smc_llc_send_confirm_rkey(link, rmb_desc);
+	if (rc)
+		return rc;
 	/* receive CONFIRM RKEY response from server over RoCE fabric */
 	rc = wait_for_completion_interruptible_timeout(&link->llc_confirm_rkey,
 						       SMC_LLC_WAIT_TIME);
@@ -660,6 +690,29 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
 	return 0;
 }
 
+/* unregister an rtoken at the remote peer */
+int smc_llc_do_delete_rkey(struct smc_link *link,
+			   struct smc_buf_desc *rmb_desc)
+{
+	int rc;
+
+	mutex_lock(&link->llc_delete_rkey_mutex);
+	reinit_completion(&link->llc_delete_rkey);
+	rc = smc_llc_send_delete_rkey(link, rmb_desc);
+	if (rc)
+		goto out;
+	/* receive DELETE RKEY response from server over RoCE fabric */
+	rc = wait_for_completion_interruptible_timeout(&link->llc_delete_rkey,
+						       SMC_LLC_WAIT_TIME);
+	if (rc <= 0 || link->llc_delete_rkey_rc)
+		rc = -EFAULT;
+	else
+		rc = 0;
+out:
+	mutex_unlock(&link->llc_delete_rkey_mutex);
+	return rc;
+}
+
 /***************************** init, exit, misc ******************************/
 
 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
index 9e2ff08..461c0c3 100644
--- a/net/smc/smc_llc.h
+++ b/net/smc/smc_llc.h
@@ -49,6 +49,8 @@ void smc_llc_link_inactive(struct smc_link *link);
 void smc_llc_link_clear(struct smc_link *link);
 int smc_llc_do_confirm_rkey(struct smc_link *link,
 			    struct smc_buf_desc *rmb_desc);
+int smc_llc_do_delete_rkey(struct smc_link *link,
+			   struct smc_buf_desc *rmb_desc);
 int smc_llc_init(void) __init;
 
 #endif /* SMC_LLC_H */
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 9062967..7e55cfc 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -175,7 +175,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 		return -1;
 	if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 	    !skb->csum_complete_sw)
-		netdev_rx_csum_fault(skb->dev);
+		netdev_rx_csum_fault(skb->dev, skb);
 	return 0;
 no_checksum:
 	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 74b9d91..fe23fac 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -353,30 +353,29 @@ static size_t switchdev_obj_size(const struct switchdev_obj *obj)
 	return 0;
 }
 
-static int __switchdev_port_obj_add(struct net_device *dev,
-				    const struct switchdev_obj *obj,
-				    struct switchdev_trans *trans)
+static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
+				     struct net_device *dev,
+				     const struct switchdev_obj *obj,
+				     struct switchdev_trans *trans)
 {
-	const struct switchdev_ops *ops = dev->switchdev_ops;
-	struct net_device *lower_dev;
-	struct list_head *iter;
-	int err = -EOPNOTSUPP;
+	int rc;
+	int err;
 
-	if (ops && ops->switchdev_port_obj_add)
-		return ops->switchdev_port_obj_add(dev, obj, trans);
+	struct switchdev_notifier_port_obj_info obj_info = {
+		.obj = obj,
+		.trans = trans,
+		.handled = false,
+	};
 
-	/* Switch device port(s) may be stacked under
-	 * bond/team/vlan dev, so recurse down to add object on
-	 * each port.
-	 */
-
-	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = __switchdev_port_obj_add(lower_dev, obj, trans);
-		if (err)
-			break;
+	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info);
+	err = notifier_to_errno(rc);
+	if (err) {
+		WARN_ON(!obj_info.handled);
+		return err;
 	}
-
-	return err;
+	if (!obj_info.handled)
+		return -EOPNOTSUPP;
+	return 0;
 }
 
 static int switchdev_port_obj_add_now(struct net_device *dev,
@@ -397,7 +396,8 @@ static int switchdev_port_obj_add_now(struct net_device *dev,
 	 */
 
 	trans.ph_prepare = true;
-	err = __switchdev_port_obj_add(dev, obj, &trans);
+	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
+					dev, obj, &trans);
 	if (err) {
 		/* Prepare phase failed: abort the transaction.  Any
 		 * resources reserved in the prepare phase are
@@ -416,7 +416,8 @@ static int switchdev_port_obj_add_now(struct net_device *dev,
 	 */
 
 	trans.ph_prepare = false;
-	err = __switchdev_port_obj_add(dev, obj, &trans);
+	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
+					dev, obj, &trans);
 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
 	switchdev_trans_items_warn_destroy(dev, &trans);
 
@@ -471,26 +472,8 @@ EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 static int switchdev_port_obj_del_now(struct net_device *dev,
 				      const struct switchdev_obj *obj)
 {
-	const struct switchdev_ops *ops = dev->switchdev_ops;
-	struct net_device *lower_dev;
-	struct list_head *iter;
-	int err = -EOPNOTSUPP;
-
-	if (ops && ops->switchdev_port_obj_del)
-		return ops->switchdev_port_obj_del(dev, obj);
-
-	/* Switch device port(s) may be stacked under
-	 * bond/team/vlan dev, so recurse down to delete object on
-	 * each port.
-	 */
-
-	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = switchdev_port_obj_del_now(lower_dev, obj);
-		if (err)
-			break;
-	}
-
-	return err;
+	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
+					 dev, obj, NULL);
 }
 
 static void switchdev_port_obj_del_deferred(struct net_device *dev,
@@ -535,6 +518,7 @@ int switchdev_port_obj_del(struct net_device *dev,
 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 
 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
+static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
 
 /**
  *	register_switchdev_notifier - Register notifier
@@ -576,6 +560,31 @@ int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 
+int register_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
+
+	return blocking_notifier_chain_register(chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
+
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
+
+	return blocking_notifier_chain_unregister(chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
+
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+				      struct switchdev_notifier_info *info)
+{
+	info->dev = dev;
+	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
+					    val, info);
+}
+EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
+
 bool switchdev_port_same_parent_id(struct net_device *a,
 				   struct net_device *b)
 {
@@ -595,3 +604,103 @@ bool switchdev_port_same_parent_id(struct net_device *a,
 	return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
 }
 EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
+
+static int __switchdev_handle_port_obj_add(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*add_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj,
+				      struct switchdev_trans *trans))
+{
+	struct net_device *lower_dev;
+	struct list_head *iter;
+	int err = -EOPNOTSUPP;
+
+	if (check_cb(dev)) {
+		/* This flag is only checked if the return value is success. */
+		port_obj_info->handled = true;
+		return add_cb(dev, port_obj_info->obj, port_obj_info->trans);
+	}
+
+	/* Switch ports might be stacked under e.g. a LAG. Ignore the
+	 * unsupported devices, another driver might be able to handle them. But
+	 * propagate to the callers any hard errors.
+	 *
+	 * If the driver does its own bookkeeping of stacked ports, it's not
+	 * necessary to go through this helper.
+	 */
+	netdev_for_each_lower_dev(dev, lower_dev, iter) {
+		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
+						      check_cb, add_cb);
+		if (err && err != -EOPNOTSUPP)
+			return err;
+	}
+
+	return err;
+}
+
+int switchdev_handle_port_obj_add(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*add_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj,
+				      struct switchdev_trans *trans))
+{
+	int err;
+
+	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
+					      add_cb);
+	if (err == -EOPNOTSUPP)
+		err = 0;
+	return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
+
+static int __switchdev_handle_port_obj_del(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*del_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj))
+{
+	struct net_device *lower_dev;
+	struct list_head *iter;
+	int err = -EOPNOTSUPP;
+
+	if (check_cb(dev)) {
+		/* This flag is only checked if the return value is success. */
+		port_obj_info->handled = true;
+		return del_cb(dev, port_obj_info->obj);
+	}
+
+	/* Switch ports might be stacked under e.g. a LAG. Ignore the
+	 * unsupported devices, another driver might be able to handle them. But
+	 * propagate to the callers any hard errors.
+	 *
+	 * If the driver does its own bookkeeping of stacked ports, it's not
+	 * necessary to go through this helper.
+	 */
+	netdev_for_each_lower_dev(dev, lower_dev, iter) {
+		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
+						      check_cb, del_cb);
+		if (err && err != -EOPNOTSUPP)
+			return err;
+	}
+
+	return err;
+}
+
+int switchdev_handle_port_obj_del(struct net_device *dev,
+			struct switchdev_notifier_port_obj_info *port_obj_info,
+			bool (*check_cb)(const struct net_device *dev),
+			int (*del_cb)(struct net_device *dev,
+				      const struct switchdev_obj *obj))
+{
+	int err;
+
+	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
+					      del_cb);
+	if (err == -EOPNOTSUPP)
+		err = 0;
+	return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 836727e..9e265eb8 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -105,7 +105,7 @@ struct tipc_stats {
  * @transmitq: queue for sent, non-acked messages
  * @backlogq: queue for messages waiting to be sent
  * @snt_nxt: next sequence number to use for outbound messages
- * @last_retransmitted: sequence number of most recently retransmitted message
+ * @prev_from: sequence number of most previous retransmission request
  * @stale_cnt: counter for number of identical retransmit attempts
  * @stale_limit: time when repeated identical retransmits must force link reset
  * @ackers: # of peers that needs to ack each packet before it can be released
@@ -163,7 +163,7 @@ struct tipc_link {
 		u16 limit;
 	} backlog[5];
 	u16 snd_nxt;
-	u16 last_retransm;
+	u16 prev_from;
 	u16 window;
 	u16 stale_cnt;
 	unsigned long stale_limit;
@@ -186,9 +186,6 @@ struct tipc_link {
 	u16 acked;
 	struct tipc_link *bc_rcvlink;
 	struct tipc_link *bc_sndlink;
-	unsigned long prev_retr;
-	u16 prev_from;
-	u16 prev_to;
 	u8 nack_state;
 	bool bc_peer_is_up;
 
@@ -210,7 +207,7 @@ enum {
 	BC_NACK_SND_SUPPRESS,
 };
 
-#define TIPC_BC_RETR_LIMIT 10   /* [ms] */
+#define TIPC_BC_RETR_LIM msecs_to_jiffies(10)   /* [ms] */
 
 /*
  * Interval between NACKs when packets arrive out of order
@@ -1036,10 +1033,12 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
 
 	if (!skb)
 		return 0;
+	if (less(to, from))
+		return 0;
 
 	/* Detect repeated retransmit failures on same packet */
-	if (r->last_retransm != buf_seqno(skb)) {
-		r->last_retransm = buf_seqno(skb);
+	if (r->prev_from != from) {
+		r->prev_from = from;
 		r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
 		r->stale_cnt = 0;
 	} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
@@ -1055,6 +1054,11 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
 			continue;
 		if (more(msg_seqno(hdr), to))
 			break;
+		if (link_is_bc_sndlink(l)) {
+			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
+				continue;
+			TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
+		}
 		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
 		if (!_skb)
 			return 0;
@@ -1737,42 +1741,6 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
 		l->rcv_nxt = peers_snd_nxt;
 }
 
-/* link_bc_retr eval()- check if the indicated range can be retransmitted now
- * - Adjust permitted range if there is overlap with previous retransmission
- */
-static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
-{
-	unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
-
-	if (less(*to, *from))
-		return false;
-
-	/* New retransmission request */
-	if ((elapsed > TIPC_BC_RETR_LIMIT) ||
-	    less(*to, l->prev_from) || more(*from, l->prev_to)) {
-		l->prev_from = *from;
-		l->prev_to = *to;
-		l->prev_retr = jiffies;
-		return true;
-	}
-
-	/* Inside range of previous retransmit */
-	if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
-		return false;
-
-	/* Fully or partially outside previous range => exclude overlap */
-	if (less(*from, l->prev_from)) {
-		*to = l->prev_from - 1;
-		l->prev_from = *from;
-	}
-	if (more(*to, l->prev_to)) {
-		*from = l->prev_to + 1;
-		l->prev_to = *to;
-	}
-	l->prev_retr = jiffies;
-	return true;
-}
-
 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
  */
 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
@@ -1803,8 +1771,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
 		return rc;
 
-	if (link_bc_retr_eval(snd_l, &from, &to))
-		rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
+	rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
 
 	l->snd_nxt = peers_snd_nxt;
 	if (link_bc_rcv_gap(l))
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a2879e6..a092495 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -105,6 +105,7 @@ struct tipc_skb_cb {
 	u32 bytes_read;
 	u32 orig_member;
 	struct sk_buff *tail;
+	unsigned long nxt_retr;
 	bool validated;
 	u16 chain_imp;
 	u16 ackers;
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e39dfb4..4a54236 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -37,7 +37,8 @@
 	test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
 	get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
 	test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
-	test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o
+	test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
+	xdp_dummy.o
 
 # Order correspond to 'make run_tests' order
 TEST_PROGS := test_kmod.sh \
diff --git a/tools/testing/selftests/bpf/xdp_dummy.c b/tools/testing/selftests/bpf/xdp_dummy.c
new file mode 100644
index 0000000..43b0ef1
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_dummy.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define KBUILD_MODNAME "xdp_dummy"
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+SEC("xdp_dummy")
+int xdp_dummy_prog(struct xdp_md *ctx)
+{
+	return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index 3b75180..00ae99f 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -8,7 +8,7 @@
 lib_dir=$(dirname $0)/../../../../net/forwarding
 
 ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
-	multiple_masks_test ctcam_edge_cases_test"
+	multiple_masks_test ctcam_edge_cases_test delta_simple_test"
 NUM_NETIFS=2
 source $lib_dir/tc_common.sh
 source $lib_dir/lib.sh
@@ -142,7 +142,7 @@
 	tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
 		$tcflags dst_ip 192.0.2.2 action drop
 	tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
-		$tcflags dst_ip 192.0.0.0/16 action drop
+		$tcflags dst_ip 192.0.0.0/8 action drop
 
 	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
 		-t ip -q
@@ -235,7 +235,7 @@
 		$tcflags dst_ip 192.0.2.2 action drop
 	# Filter goes into A-TCAM
 	tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
-		$tcflags dst_ip 192.0.2.0/24 action drop
+		$tcflags dst_ip 192.0.0.0/16 action drop
 
 	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
 		-t ip -q
@@ -324,6 +324,86 @@
 	ctcam_no_atcam_masks_test
 }
 
+tp_record()
+{
+	local tracepoint=$1
+	local cmd=$2
+
+	perf record -q -e $tracepoint $cmd
+	return $?
+}
+
+tp_check_hits()
+{
+	local tracepoint=$1
+	local count=$2
+
+	perf_output=`perf script -F trace:event,trace`
+	hits=`echo $perf_output | grep "$tracepoint:" | wc -l`
+	if [[ "$count" -ne "$hits" ]]; then
+		return 1
+	fi
+	return 0
+}
+
+delta_simple_test()
+{
+	# The first filter will create eRP, the second filter will fit into
+	# the first eRP with delta. Remove the first rule then and check that
+        # the eRP stays (referenced by the second filter).
+
+	RET=0
+
+	if [[ "$tcflags" != "skip_sw" ]]; then
+		return 0;
+	fi
+
+	tp_record "objagg:*" "tc filter add dev $h2 ingress protocol ip \
+		   pref 1 handle 101 flower $tcflags dst_ip 192.0.0.0/24 \
+		   action drop"
+	tp_check_hits "objagg:objagg_obj_root_create" 1
+	check_err $? "eRP was not created"
+
+	tp_record "objagg:*" "tc filter add dev $h2 ingress protocol ip \
+		   pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
+		   action drop"
+	tp_check_hits "objagg:objagg_obj_root_create" 0
+	check_err $? "eRP was incorrectly created"
+	tp_check_hits "objagg:objagg_obj_parent_assign" 1
+	check_err $? "delta was not created"
+
+	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+		-t ip -q
+
+	tc_check_packets "dev $h2 ingress" 101 1
+	check_fail $? "Matched a wrong filter"
+
+	tc_check_packets "dev $h2 ingress" 102 1
+	check_err $? "Did not match on correct filter"
+
+	tp_record "objagg:*" "tc filter del dev $h2 ingress protocol ip \
+		   pref 1 handle 101 flower"
+	tp_check_hits "objagg:objagg_obj_root_destroy" 0
+	check_err $? "eRP was incorrectly destroyed"
+	tp_check_hits "objagg:objagg_obj_parent_unassign" 0
+	check_err $? "delta was incorrectly destroyed"
+
+	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+		-t ip -q
+
+	tc_check_packets "dev $h2 ingress" 102 2
+	check_err $? "Did not match on correct filter after the first was removed"
+
+	tp_record "objagg:*" "tc filter del dev $h2 ingress protocol ip \
+		   pref 2 handle 102 flower"
+	tp_check_hits "objagg:objagg_obj_parent_unassign" 1
+	check_err $? "delta was not destroyed"
+	tp_check_hits "objagg:objagg_obj_root_destroy" 1
+	check_err $? "eRP was not destroyed"
+
+	log_test "delta simple test ($tcflags)"
+}
+
 setup_prepare()
 {
 	h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
new file mode 100755
index 0000000..ffa79b7
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -0,0 +1,664 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test various aspects of VxLAN offloading which are specific to mlxsw, such
+# as sanitization of invalid configurations and offload indication.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="sanitization_test offload_indication_test"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+
+setup_prepare()
+{
+	swp1=${NETIFS[p1]}
+	swp2=${NETIFS[p2]}
+
+	ip link set dev $swp1 up
+	ip link set dev $swp2 up
+}
+
+cleanup()
+{
+	pre_cleanup
+
+	ip link set dev $swp2 down
+	ip link set dev $swp1 down
+}
+
+sanitization_single_dev_test_pass()
+{
+	ip link set dev $swp1 master br0
+	check_err $?
+	ip link set dev vxlan0 master br0
+	check_err $?
+
+	ip link set dev $swp1 nomaster
+
+	ip link set dev $swp1 master br0
+	check_err $?
+}
+
+sanitization_single_dev_test_fail()
+{
+	ip link set dev $swp1 master br0
+	check_err $?
+	ip link set dev vxlan0 master br0 &> /dev/null
+	check_fail $?
+
+	ip link set dev $swp1 nomaster
+
+	ip link set dev vxlan0 master br0
+	check_err $?
+	ip link set dev $swp1 master br0 &> /dev/null
+	check_fail $?
+}
+
+sanitization_single_dev_valid_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_single_dev_test_pass
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device - valid configuration"
+}
+
+sanitization_single_dev_vlan_aware_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0 vlan_filtering 1
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with a vlan-aware bridge"
+}
+
+sanitization_single_dev_mcast_enabled_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with a multicast enabled bridge"
+}
+
+sanitization_single_dev_mcast_group_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
+		dev $swp2 group 239.0.0.1
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with a multicast group"
+}
+
+sanitization_single_dev_no_local_ip_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit dstport 4789
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with no local ip"
+}
+
+sanitization_single_dev_local_ipv6_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 2001:db8::1 dstport 4789
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with local ipv6 address"
+}
+
+sanitization_single_dev_learning_enabled_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 learning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_single_dev_test_pass
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with learning enabled"
+}
+
+sanitization_single_dev_local_interface_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev $swp2
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with local interface"
+}
+
+sanitization_single_dev_port_range_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
+		srcport 4000 5000
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with udp source port range"
+}
+
+sanitization_single_dev_tos_static_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos 20 local 198.51.100.1 dstport 4789
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with static tos"
+}
+
+sanitization_single_dev_ttl_inherit_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl inherit tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with inherit ttl"
+}
+
+sanitization_single_dev_udp_checksum_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning udpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_single_dev_test_fail
+
+	ip link del dev vxlan0
+	ip link del dev br0
+
+	log_test "vxlan device with udp checksum"
+}
+
+sanitization_single_dev_test()
+{
+	# These tests make sure that we correctly sanitize VxLAN device
+	# configurations we do not support
+	sanitization_single_dev_valid_test
+	sanitization_single_dev_vlan_aware_test
+	sanitization_single_dev_mcast_enabled_test
+	sanitization_single_dev_mcast_group_test
+	sanitization_single_dev_no_local_ip_test
+	sanitization_single_dev_local_ipv6_test
+	sanitization_single_dev_learning_enabled_test
+	sanitization_single_dev_local_interface_test
+	sanitization_single_dev_port_range_test
+	sanitization_single_dev_tos_static_test
+	sanitization_single_dev_ttl_inherit_test
+	sanitization_single_dev_udp_checksum_test
+}
+
+sanitization_multi_devs_test_pass()
+{
+	ip link set dev $swp1 master br0
+	check_err $?
+	ip link set dev vxlan0 master br0
+	check_err $?
+	ip link set dev $swp2 master br1
+	check_err $?
+	ip link set dev vxlan1 master br1
+	check_err $?
+
+	ip link set dev $swp2 nomaster
+	ip link set dev $swp1 nomaster
+
+	ip link set dev $swp1 master br0
+	check_err $?
+	ip link set dev $swp2 master br1
+	check_err $?
+}
+
+sanitization_multi_devs_test_fail()
+{
+	ip link set dev $swp1 master br0
+	check_err $?
+	ip link set dev vxlan0 master br0
+	check_err $?
+	ip link set dev $swp2 master br1
+	check_err $?
+	ip link set dev vxlan1 master br1 &> /dev/null
+	check_fail $?
+
+	ip link set dev $swp2 nomaster
+	ip link set dev $swp1 nomaster
+
+	ip link set dev vxlan1 master br1
+	check_err $?
+	ip link set dev $swp1 master br0
+	check_err $?
+	ip link set dev $swp2 master br1 &> /dev/null
+	check_fail $?
+}
+
+sanitization_multi_devs_valid_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+	ip link add dev br1 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+	ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_multi_devs_test_pass
+
+	ip link del dev vxlan1
+	ip link del dev vxlan0
+	ip link del dev br1
+	ip link del dev br0
+
+	log_test "multiple vxlan devices - valid configuration"
+}
+
+sanitization_multi_devs_ttl_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+	ip link add dev br1 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+	ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+		ttl 40 tos inherit local 198.51.100.1 dstport 4789
+
+	sanitization_multi_devs_test_fail
+
+	ip link del dev vxlan1
+	ip link del dev vxlan0
+	ip link del dev br1
+	ip link del dev br0
+
+	log_test "multiple vxlan devices with different ttl"
+}
+
+sanitization_multi_devs_udp_dstport_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+	ip link add dev br1 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+	ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 5789
+
+	sanitization_multi_devs_test_fail
+
+	ip link del dev vxlan1
+	ip link del dev vxlan0
+	ip link del dev br1
+	ip link del dev br0
+
+	log_test "multiple vxlan devices with different udp destination port"
+}
+
+sanitization_multi_devs_local_ip_test()
+{
+	RET=0
+
+	ip link add dev br0 type bridge mcast_snooping 0
+	ip link add dev br1 type bridge mcast_snooping 0
+
+	ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+	ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.2 dstport 4789
+
+	sanitization_multi_devs_test_fail
+
+	ip link del dev vxlan1
+	ip link del dev vxlan0
+	ip link del dev br1
+	ip link del dev br0
+
+	log_test "multiple vxlan devices with different local ip"
+}
+
+sanitization_multi_devs_test()
+{
+	# The device has a single VTEP, which means all the VxLAN devices
+	# we offload must share certain properties such as source IP and
+	# UDP destination port. These tests make sure that we forbid
+	# configurations that violate this limitation
+	sanitization_multi_devs_valid_test
+	sanitization_multi_devs_ttl_test
+	sanitization_multi_devs_udp_dstport_test
+	sanitization_multi_devs_local_ip_test
+}
+
+sanitization_test()
+{
+	sanitization_single_dev_test
+	sanitization_multi_devs_test
+}
+
+offload_indication_setup_create()
+{
+	# Create a simple setup with two bridges, each with a VxLAN device
+	# and one local port
+	ip link add name br0 up type bridge mcast_snooping 0
+	ip link add name br1 up type bridge mcast_snooping 0
+
+	ip link set dev $swp1 master br0
+	ip link set dev $swp2 master br1
+
+	ip address add 198.51.100.1/32 dev lo
+
+	ip link add name vxlan0 up master br0 type vxlan id 10 nolearning \
+		noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+	ip link add name vxlan1 up master br1 type vxlan id 20 nolearning \
+		noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+}
+
+offload_indication_setup_destroy()
+{
+	ip link del dev vxlan1
+	ip link del dev vxlan0
+
+	ip address del 198.51.100.1/32 dev lo
+
+	ip link set dev $swp2 nomaster
+	ip link set dev $swp1 nomaster
+
+	ip link del dev br1
+	ip link del dev br0
+}
+
+offload_indication_fdb_flood_test()
+{
+	RET=0
+
+	bridge fdb append 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.2
+
+	bridge fdb show brport vxlan0 | grep 00:00:00:00:00:00 \
+		| grep -q offload
+	check_err $?
+
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self
+
+	log_test "vxlan flood entry offload indication"
+}
+
+offload_indication_fdb_bridge_test()
+{
+	RET=0
+
+	bridge fdb add de:ad:be:ef:13:37 dev vxlan0 self master static \
+		dst 198.51.100.2
+
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+		| grep -q offload
+	check_err $?
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+		| grep -q offload
+	check_err $?
+
+	log_test "vxlan entry offload indication - initial state"
+
+	# Remove FDB entry from the bridge driver and check that corresponding
+	# entry in the VxLAN driver is not marked as offloaded
+	RET=0
+
+	bridge fdb del de:ad:be:ef:13:37 dev vxlan0 master
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+		| grep -q offload
+	check_fail $?
+
+	log_test "vxlan entry offload indication - after removal from bridge"
+
+	# Add the FDB entry back to the bridge driver and make sure it is
+	# marked as offloaded in both drivers
+	RET=0
+
+	bridge fdb add de:ad:be:ef:13:37 dev vxlan0 master static
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+		| grep -q offload
+	check_err $?
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+		| grep -q offload
+	check_err $?
+
+	log_test "vxlan entry offload indication - after re-add to bridge"
+
+	# Remove FDB entry from the VxLAN driver and check that corresponding
+	# entry in the bridge driver is not marked as offloaded
+	RET=0
+
+	bridge fdb del de:ad:be:ef:13:37 dev vxlan0 self
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+		| grep -q offload
+	check_fail $?
+
+	log_test "vxlan entry offload indication - after removal from vxlan"
+
+	# Add the FDB entry back to the VxLAN driver and make sure it is
+	# marked as offloaded in both drivers
+	RET=0
+
+	bridge fdb add de:ad:be:ef:13:37 dev vxlan0 self dst 198.51.100.2
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+		| grep -q offload
+	check_err $?
+	bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+		| grep -q offload
+	check_err $?
+
+	log_test "vxlan entry offload indication - after re-add to vxlan"
+
+	bridge fdb del de:ad:be:ef:13:37 dev vxlan0 self master
+}
+
+offload_indication_fdb_test()
+{
+	offload_indication_fdb_flood_test
+	offload_indication_fdb_bridge_test
+}
+
+offload_indication_decap_route_test()
+{
+	RET=0
+
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	ip link set dev vxlan0 down
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	ip link set dev vxlan1 down
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_fail $?
+
+	log_test "vxlan decap route - vxlan device down"
+
+	RET=0
+
+	ip link set dev vxlan1 up
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	ip link set dev vxlan0 up
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	log_test "vxlan decap route - vxlan device up"
+
+	RET=0
+
+	ip address delete 198.51.100.1/32 dev lo
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_fail $?
+
+	ip address add 198.51.100.1/32 dev lo
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	log_test "vxlan decap route - add local route"
+
+	RET=0
+
+	ip link set dev $swp1 nomaster
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	ip link set dev $swp2 nomaster
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_fail $?
+
+	ip link set dev $swp1 master br0
+	ip link set dev $swp2 master br1
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	log_test "vxlan decap route - local ports enslavement"
+
+	RET=0
+
+	ip link del dev br0
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	ip link del dev br1
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_fail $?
+
+	log_test "vxlan decap route - bridge device deletion"
+
+	RET=0
+
+	ip link add name br0 up type bridge mcast_snooping 0
+	ip link add name br1 up type bridge mcast_snooping 0
+	ip link set dev $swp1 master br0
+	ip link set dev $swp2 master br1
+	ip link set dev vxlan0 master br0
+	ip link set dev vxlan1 master br1
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	ip link del dev vxlan0
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_err $?
+
+	ip link del dev vxlan1
+	ip route show table local | grep 198.51.100.1 | grep -q offload
+	check_fail $?
+
+	log_test "vxlan decap route - vxlan device deletion"
+
+	ip link add name vxlan0 up master br0 type vxlan id 10 nolearning \
+		noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+	ip link add name vxlan1 up master br1 type vxlan id 20 nolearning \
+		noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+}
+
+offload_indication_test()
+{
+	offload_indication_setup_create
+	offload_indication_fdb_test
+	offload_indication_decap_route_test
+	offload_indication_setup_destroy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
new file mode 100755
index 0000000..fedcb7b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
@@ -0,0 +1,309 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test VxLAN flooding. The device stores flood records in a singly linked list
+# where each record stores up to three IPv4 addresses of remote VTEPs. The test
+# verifies that packets are correctly flooded in various cases such as deletion
+# of a record in the middle of the list.
+#
+# +--------------------+
+# | H1 (vrf)           |
+# |    + $h1           |
+# |    | 203.0.113.1/24|
+# +----|---------------+
+#      |
+# +----|----------------------------------------------------------------------+
+# | SW |                                                                      |
+# | +--|--------------------------------------------------------------------+ |
+# | |  + $swp1                   BR0 (802.1d)                               | |
+# | |                                                                       | |
+# | |  + vxlan0 (vxlan)                                                     | |
+# | |    local 198.51.100.1                                                 | |
+# | |    remote 198.51.100.{2..13}                                          | |
+# | |    id 10 dstport 4789                                                 | |
+# | +-----------------------------------------------------------------------+ |
+# |                                                                           |
+# |  198.51.100.0/24 via 192.0.2.2                                            |
+# |                                                                           |
+# |    + $rp1                                                                 |
+# |    | 192.0.2.1/24                                                         |
+# +----|----------------------------------------------------------------------+
+#      |
+# +----|--------------------------------------------------------+
+# |    |                                               R2 (vrf) |
+# |    + $rp2                                                   |
+# |      192.0.2.2/24                                           |
+# |                                                             |
+# +-------------------------------------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="flooding_test"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+h1_create()
+{
+	simple_if_init $h1 203.0.113.1/24
+}
+
+h1_destroy()
+{
+	simple_if_fini $h1 203.0.113.1/24
+}
+
+switch_create()
+{
+	# Make sure the bridge uses the MAC address of the local port and
+	# not that of the VxLAN's device
+	ip link add dev br0 type bridge mcast_snooping 0
+	ip link set dev br0 address $(mac_get $swp1)
+
+	ip link add name vxlan0 type vxlan id 10 nolearning noudpcsum \
+		ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+	ip address add 198.51.100.1/32 dev lo
+
+	ip link set dev $swp1 master br0
+	ip link set dev vxlan0 master br0
+
+	ip link set dev br0 up
+	ip link set dev $swp1 up
+	ip link set dev vxlan0 up
+}
+
+switch_destroy()
+{
+	ip link set dev vxlan0 down
+	ip link set dev $swp1 down
+	ip link set dev br0 down
+
+	ip link set dev vxlan0 nomaster
+	ip link set dev $swp1 nomaster
+
+	ip address del 198.51.100.1/32 dev lo
+
+	ip link del dev vxlan0
+
+	ip link del dev br0
+}
+
+router1_create()
+{
+	# This router is in the default VRF, where the VxLAN device is
+	# performing the L3 lookup
+	ip link set dev $rp1 up
+	ip address add 192.0.2.1/24 dev $rp1
+	ip route add 198.51.100.0/24 via 192.0.2.2
+}
+
+router1_destroy()
+{
+	ip route del 198.51.100.0/24 via 192.0.2.2
+	ip address del 192.0.2.1/24 dev $rp1
+	ip link set dev $rp1 down
+}
+
+router2_create()
+{
+	# This router is not in the default VRF, so use simple_if_init()
+	simple_if_init $rp2 192.0.2.2/24
+}
+
+router2_destroy()
+{
+	simple_if_fini $rp2 192.0.2.2/24
+}
+
+setup_prepare()
+{
+	h1=${NETIFS[p1]}
+	swp1=${NETIFS[p2]}
+
+	rp1=${NETIFS[p3]}
+	rp2=${NETIFS[p4]}
+
+	vrf_prepare
+
+	h1_create
+
+	switch_create
+
+	router1_create
+	router2_create
+
+	forwarding_enable
+}
+
+cleanup()
+{
+	pre_cleanup
+
+	forwarding_restore
+
+	router2_destroy
+	router1_destroy
+
+	switch_destroy
+
+	h1_destroy
+
+	vrf_cleanup
+}
+
+flooding_remotes_add()
+{
+	local num_remotes=$1
+	local lsb
+	local i
+
+	for i in $(eval echo {1..$num_remotes}); do
+		lsb=$((i + 1))
+
+		bridge fdb append 00:00:00:00:00:00 dev vxlan0 self \
+			dst 198.51.100.$lsb
+	done
+}
+
+flooding_filters_add()
+{
+	local num_remotes=$1
+	local lsb
+	local i
+
+	tc qdisc add dev $rp2 clsact
+
+	for i in $(eval echo {1..$num_remotes}); do
+		lsb=$((i + 1))
+
+		tc filter add dev $rp2 ingress protocol ip pref $i handle $i \
+			flower ip_proto udp dst_ip 198.51.100.$lsb \
+			dst_port 4789 skip_sw action drop
+	done
+}
+
+flooding_filters_del()
+{
+	local num_remotes=$1
+	local i
+
+	for i in $(eval echo {1..$num_remotes}); do
+		tc filter del dev $rp2 ingress protocol ip pref $i \
+			handle $i flower
+	done
+
+	tc qdisc del dev $rp2 clsact
+}
+
+flooding_check_packets()
+{
+	local packets=("$@")
+	local num_remotes=${#packets[@]}
+	local i
+
+	for i in $(eval echo {1..$num_remotes}); do
+		tc_check_packets "dev $rp2 ingress" $i ${packets[i - 1]}
+		check_err $? "remote $i - did not get expected number of packets"
+	done
+}
+
+flooding_test()
+{
+	# Use 12 remote VTEPs that will be stored in 4 records. The array
+	# 'packets' will store how many packets are expected to be received
+	# by each remote VTEP at each stage of the test
+	declare -a packets=(1 1 1 1 1 1 1 1 1 1 1 1)
+	local num_remotes=12
+
+	RET=0
+
+	# Add FDB entries for remote VTEPs and corresponding tc filters on the
+	# ingress of the nexthop router. These filters will count how many
+	# packets were flooded to each remote VTEP
+	flooding_remotes_add $num_remotes
+	flooding_filters_add $num_remotes
+
+	# Send one packet and make sure it is flooded to all the remote VTEPs
+	$MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+	flooding_check_packets "${packets[@]}"
+	log_test "flood after 1 packet"
+
+	# Delete the third record which corresponds to VTEPs with LSB 8..10
+	# and check that packet is flooded correctly when we remove a record
+	# from the middle of the list
+	RET=0
+
+	packets=(2 2 2 2 2 2 1 1 1 2 2 2)
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.8
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.9
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.10
+
+	$MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+	flooding_check_packets "${packets[@]}"
+	log_test "flood after 2 packets"
+
+	# Delete the first record and make sure the packet is flooded correctly
+	RET=0
+
+	packets=(2 2 2 3 3 3 1 1 1 3 3 3)
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.2
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.3
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.4
+
+	$MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+	flooding_check_packets "${packets[@]}"
+	log_test "flood after 3 packets"
+
+	# Delete the last record and make sure the packet is flooded correctly
+	RET=0
+
+	packets=(2 2 2 4 4 4 1 1 1 3 3 3)
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.11
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.12
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.13
+
+	$MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+	flooding_check_packets "${packets[@]}"
+	log_test "flood after 4 packets"
+
+	# Delete the last record, one entry at a time and make sure single
+	# entries are correctly removed
+	RET=0
+
+	packets=(2 2 2 4 5 5 1 1 1 3 3 3)
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.5
+
+	$MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+	flooding_check_packets "${packets[@]}"
+	log_test "flood after 5 packets"
+
+	RET=0
+
+	packets=(2 2 2 4 5 6 1 1 1 3 3 3)
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.6
+
+	$MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+	flooding_check_packets "${packets[@]}"
+	log_test "flood after 6 packets"
+
+	RET=0
+
+	packets=(2 2 2 4 5 6 1 1 1 3 3 3)
+	bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.7
+
+	$MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+	flooding_check_packets "${packets[@]}"
+	log_test "flood after 7 packets"
+
+	flooding_filters_del $num_remotes
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 256d82d..eec35989 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -7,6 +7,7 @@
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
 TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
 TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
+TEST_PROGS += udpgro_bench.sh udpgro.sh
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index cd3a2f1..5821bdd 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -14,3 +14,17 @@
 CONFIG_DUMMY=y
 CONFIG_BRIDGE=y
 CONFIG_VLAN_8021Q=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 85d2535..7af5a03 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -104,7 +104,7 @@
 {
 	local i
 
-	for i in $(eval echo {1..$NUM_NETIFS}); do
+	for ((i = 1; i <= NUM_NETIFS; ++i)); do
 		local j=$((i+1))
 
 		ip link show dev ${NETIFS[p$i]} &> /dev/null
@@ -135,7 +135,7 @@
 	create_netif
 fi
 
-for i in $(eval echo {1..$NUM_NETIFS}); do
+for ((i = 1; i <= NUM_NETIFS; ++i)); do
 	ip link show dev ${NETIFS[p$i]} &> /dev/null
 	if [[ $? -ne 0 ]]; then
 		echo "SKIP: could not find all required interfaces"
@@ -477,11 +477,24 @@
 	ip -j link show dev $if_name | jq -r '.[]["master"]'
 }
 
+link_stats_get()
+{
+	local if_name=$1; shift
+	local dir=$1; shift
+	local stat=$1; shift
+
+	ip -j -s link show dev $if_name \
+		| jq '.[]["stats64"]["'$dir'"]["'$stat'"]'
+}
+
 link_stats_tx_packets_get()
 {
-       local if_name=$1
+	link_stats_get $1 tx packets
+}
 
-       ip -j -s link show dev $if_name | jq '.[]["stats64"]["tx"]["packets"]'
+link_stats_rx_errors_get()
+{
+	link_stats_get $1 rx errors
 }
 
 tc_rule_stats_get()
@@ -783,6 +796,17 @@
 	log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
 }
 
+in_ns()
+{
+	local name=$1; shift
+
+	ip netns exec $name bash <<-EOF
+		NUM_NETIFS=0
+		source lib.sh
+		$(for a in "$@"; do printf "%q${IFS:0:1}" "$a"; done)
+	EOF
+}
+
 ##############################################################################
 # Tests
 
@@ -790,10 +814,11 @@
 {
 	local if_name=$1
 	local dip=$2
+	local args=$3
 	local vrf_name
 
 	vrf_name=$(master_name_get $if_name)
-	ip vrf exec $vrf_name $PING $dip -c 10 -i 0.1 -w 2 &> /dev/null
+	ip vrf exec $vrf_name $PING $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
 }
 
 ping_test()
@@ -802,17 +827,18 @@
 
 	ping_do $1 $2
 	check_err $?
-	log_test "ping"
+	log_test "ping$3"
 }
 
 ping6_do()
 {
 	local if_name=$1
 	local dip=$2
+	local args=$3
 	local vrf_name
 
 	vrf_name=$(master_name_get $if_name)
-	ip vrf exec $vrf_name $PING6 $dip -c 10 -i 0.1 -w 2 &> /dev/null
+	ip vrf exec $vrf_name $PING6 $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
 }
 
 ping6_test()
@@ -821,7 +847,7 @@
 
 	ping6_do $1 $2
 	check_err $?
-	log_test "ping6"
+	log_test "ping6$3"
 }
 
 learning_test()
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
new file mode 100755
index 0000000..56cef3b
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -0,0 +1,786 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +--------------------+                               +----------------------+
+# | H1 (vrf)           |                               |             H2 (vrf) |
+# |    + $h1           |                               |  + $h2               |
+# |    | 192.0.2.1/28  |                               |  | 192.0.2.2/28      |
+# +----|---------------+                               +--|-------------------+
+#      |                                                  |
+# +----|--------------------------------------------------|-------------------+
+# | SW |                                                  |                   |
+# | +--|--------------------------------------------------|-----------------+ |
+# | |  + $swp1                   BR1 (802.1d)             + $swp2           | |
+# | |                                                                       | |
+# | |  + vx1 (vxlan)                                                        | |
+# | |    local 192.0.2.17                                                   | |
+# | |    remote 192.0.2.34 192.0.2.50                                       | |
+# | |    id 1000 dstport $VXPORT                                            | |
+# | +-----------------------------------------------------------------------+ |
+# |                                                                           |
+# |  192.0.2.32/28 via 192.0.2.18                                             |
+# |  192.0.2.48/28 via 192.0.2.18                                             |
+# |                                                                           |
+# |    + $rp1                                                                 |
+# |    | 192.0.2.17/28                                                        |
+# +----|----------------------------------------------------------------------+
+#      |
+# +----|--------------------------------------------------------+
+# |    |                                             VRP2 (vrf) |
+# |    + $rp2                                                   |
+# |      192.0.2.18/28                                          |
+# |                                                             |   (maybe) HW
+# =============================================================================
+# |                                                             |  (likely) SW
+# |    + v1 (veth)                             + v3 (veth)      |
+# |    | 192.0.2.33/28                         | 192.0.2.49/28  |
+# +----|---------------------------------------|----------------+
+#      |                                       |
+# +----|------------------------------+   +----|------------------------------+
+# |    + v2 (veth)        NS1 (netns) |   |    + v4 (veth)        NS2 (netns) |
+# |      192.0.2.34/28                |   |      192.0.2.50/28                |
+# |                                   |   |                                   |
+# |   192.0.2.16/28 via 192.0.2.33    |   |   192.0.2.16/28 via 192.0.2.49    |
+# |   192.0.2.50/32 via 192.0.2.33    |   |   192.0.2.34/32 via 192.0.2.49    |
+# |                                   |   |                                   |
+# | +-------------------------------+ |   | +-------------------------------+ |
+# | |                  BR2 (802.1d) | |   | |                  BR2 (802.1d) | |
+# | |  + vx2 (vxlan)                | |   | |  + vx2 (vxlan)                | |
+# | |    local 192.0.2.34           | |   | |    local 192.0.2.50           | |
+# | |    remote 192.0.2.17          | |   | |    remote 192.0.2.17          | |
+# | |    remote 192.0.2.50          | |   | |    remote 192.0.2.34          | |
+# | |    id 1000 dstport $VXPORT    | |   | |    id 1000 dstport $VXPORT    | |
+# | |                               | |   | |                               | |
+# | |  + w1 (veth)                  | |   | |  + w1 (veth)                  | |
+# | +--|----------------------------+ |   | +--|----------------------------+ |
+# |    |                              |   |    |                              |
+# | +--|----------------------------+ |   | +--|----------------------------+ |
+# | |  |                  VW2 (vrf) | |   | |  |                  VW2 (vrf) | |
+# | |  + w2 (veth)                  | |   | |  + w2 (veth)                  | |
+# | |    192.0.2.3/28               | |   | |    192.0.2.4/28               | |
+# | +-------------------------------+ |   | +-------------------------------+ |
+# +-----------------------------------+   +-----------------------------------+
+
+: ${VXPORT:=4789}
+export VXPORT
+
+: ${ALL_TESTS:="
+	ping_ipv4
+	test_flood
+	test_unicast
+	test_ttl
+	test_tos
+	test_ecn_encap
+	test_ecn_decap
+	reapply_config
+	ping_ipv4
+	test_flood
+	test_unicast
+	test_learning
+    "}
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+	simple_if_init $h1 192.0.2.1/28
+	tc qdisc add dev $h1 clsact
+}
+
+h1_destroy()
+{
+	tc qdisc del dev $h1 clsact
+	simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+	simple_if_init $h2 192.0.2.2/28
+	tc qdisc add dev $h2 clsact
+}
+
+h2_destroy()
+{
+	tc qdisc del dev $h2 clsact
+	simple_if_fini $h2 192.0.2.2/28
+}
+
+rp1_set_addr()
+{
+	ip address add dev $rp1 192.0.2.17/28
+
+	ip route add 192.0.2.32/28 nexthop via 192.0.2.18
+	ip route add 192.0.2.48/28 nexthop via 192.0.2.18
+}
+
+rp1_unset_addr()
+{
+	ip route del 192.0.2.48/28 nexthop via 192.0.2.18
+	ip route del 192.0.2.32/28 nexthop via 192.0.2.18
+
+	ip address del dev $rp1 192.0.2.17/28
+}
+
+switch_create()
+{
+	ip link add name br1 type bridge vlan_filtering 0 mcast_snooping 0
+	# Make sure the bridge uses the MAC address of the local port and not
+	# that of the VxLAN's device.
+	ip link set dev br1 address $(mac_get $swp1)
+	ip link set dev br1 up
+
+	ip link set dev $rp1 up
+	rp1_set_addr
+
+	ip link add name vx1 type vxlan id 1000		\
+		local 192.0.2.17 dstport "$VXPORT"	\
+		nolearning noudpcsum tos inherit ttl 100
+	ip link set dev vx1 up
+
+	ip link set dev vx1 master br1
+	ip link set dev $swp1 master br1
+	ip link set dev $swp1 up
+
+	ip link set dev $swp2 master br1
+	ip link set dev $swp2 up
+
+	bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+	bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+}
+
+switch_destroy()
+{
+	rp1_unset_addr
+	ip link set dev $rp1 down
+
+	bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+	bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+
+	ip link set dev vx1 nomaster
+	ip link set dev vx1 down
+	ip link del dev vx1
+
+	ip link set dev $swp2 down
+	ip link set dev $swp2 nomaster
+
+	ip link set dev $swp1 down
+	ip link set dev $swp1 nomaster
+
+	ip link set dev br1 down
+	ip link del dev br1
+}
+
+vrp2_create()
+{
+	simple_if_init $rp2 192.0.2.18/28
+	__simple_if_init v1 v$rp2 192.0.2.33/28
+	__simple_if_init v3 v$rp2 192.0.2.49/28
+	tc qdisc add dev v1 clsact
+}
+
+vrp2_destroy()
+{
+	tc qdisc del dev v1 clsact
+	__simple_if_fini v3 192.0.2.49/28
+	__simple_if_fini v1 192.0.2.33/28
+	simple_if_fini $rp2 192.0.2.18/28
+}
+
+ns_init_common()
+{
+	local in_if=$1; shift
+	local in_addr=$1; shift
+	local other_in_addr=$1; shift
+	local nh_addr=$1; shift
+	local host_addr=$1; shift
+
+	ip link set dev $in_if up
+	ip address add dev $in_if $in_addr/28
+	tc qdisc add dev $in_if clsact
+
+	ip link add name br2 type bridge vlan_filtering 0
+	ip link set dev br2 up
+
+	ip link add name w1 type veth peer name w2
+
+	ip link set dev w1 master br2
+	ip link set dev w1 up
+
+	ip link add name vx2 type vxlan id 1000 local $in_addr dstport "$VXPORT"
+	ip link set dev vx2 up
+	bridge fdb append dev vx2 00:00:00:00:00:00 dst 192.0.2.17 self
+	bridge fdb append dev vx2 00:00:00:00:00:00 dst $other_in_addr self
+
+	ip link set dev vx2 master br2
+	tc qdisc add dev vx2 clsact
+
+	simple_if_init w2 $host_addr/28
+
+	ip route add 192.0.2.16/28 nexthop via $nh_addr
+	ip route add $other_in_addr/32 nexthop via $nh_addr
+}
+export -f ns_init_common
+
+ns1_create()
+{
+	ip netns add ns1
+	ip link set dev v2 netns ns1
+	in_ns ns1 \
+	      ns_init_common v2 192.0.2.34 192.0.2.50 192.0.2.33 192.0.2.3
+}
+
+ns1_destroy()
+{
+	ip netns exec ns1 ip link set dev v2 netns 1
+	ip netns del ns1
+}
+
+ns2_create()
+{
+	ip netns add ns2
+	ip link set dev v4 netns ns2
+	in_ns ns2 \
+	      ns_init_common v4 192.0.2.50 192.0.2.34 192.0.2.49 192.0.2.4
+}
+
+ns2_destroy()
+{
+	ip netns exec ns2 ip link set dev v4 netns 1
+	ip netns del ns2
+}
+
+setup_prepare()
+{
+	h1=${NETIFS[p1]}
+	swp1=${NETIFS[p2]}
+
+	swp2=${NETIFS[p3]}
+	h2=${NETIFS[p4]}
+
+	rp1=${NETIFS[p5]}
+	rp2=${NETIFS[p6]}
+
+	vrf_prepare
+	forwarding_enable
+
+	h1_create
+	h2_create
+	switch_create
+
+	ip link add name v1 type veth peer name v2
+	ip link add name v3 type veth peer name v4
+	vrp2_create
+	ns1_create
+	ns2_create
+
+	r1_mac=$(in_ns ns1 mac_get w2)
+	r2_mac=$(in_ns ns2 mac_get w2)
+	h2_mac=$(mac_get $h2)
+}
+
+cleanup()
+{
+	pre_cleanup
+
+	ns2_destroy
+	ns1_destroy
+	vrp2_destroy
+	ip link del dev v3
+	ip link del dev v1
+
+	switch_destroy
+	h2_destroy
+	h1_destroy
+
+	forwarding_restore
+	vrf_cleanup
+}
+
+# For the first round of tests, vx1 is the first device to get attached to the
+# bridge, and that at the point that the local IP is already configured. Try the
+# other scenario of attaching the device to an already-offloaded bridge, and
+# only then attach the local IP.
+reapply_config()
+{
+	echo "Reapplying configuration"
+
+	bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+	bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+	rp1_unset_addr
+	ip link set dev vx1 nomaster
+	sleep 5
+
+	ip link set dev vx1 master br1
+	bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+	bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+	sleep 1
+	rp1_set_addr
+	sleep 5
+}
+
+ping_ipv4()
+{
+	ping_test $h1 192.0.2.2 ": local->local"
+	ping_test $h1 192.0.2.3 ": local->remote 1"
+	ping_test $h1 192.0.2.4 ": local->remote 2"
+}
+
+maybe_in_ns()
+{
+	echo ${1:+in_ns} $1
+}
+
+__flood_counter_add_del()
+{
+	local add_del=$1; shift
+	local dev=$1; shift
+	local ns=$1; shift
+
+	# Putting the ICMP capture both to HW and to SW will end up
+	# double-counting the packets that are trapped to slow path, such as for
+	# the unicast test. Adding either skip_hw or skip_sw fixes this problem,
+	# but with skip_hw, the flooded packets are not counted at all, because
+	# those are dropped due to MAC address mismatch; and skip_sw is a no-go
+	# for veth-based topologies.
+	#
+	# So try to install with skip_sw and fall back to skip_sw if that fails.
+
+	$(maybe_in_ns $ns) __icmp_capture_add_del          \
+			   $add_del 100 "" $dev skip_sw 2>/dev/null || \
+	$(maybe_in_ns $ns) __icmp_capture_add_del          \
+			   $add_del 100 "" $dev skip_hw
+}
+
+flood_counter_install()
+{
+	__flood_counter_add_del add "$@"
+}
+
+flood_counter_uninstall()
+{
+	__flood_counter_add_del del "$@"
+}
+
+flood_fetch_stat()
+{
+	local dev=$1; shift
+	local ns=$1; shift
+
+	$(maybe_in_ns $ns) tc_rule_stats_get $dev 100 ingress
+}
+
+flood_fetch_stats()
+{
+	local counters=("${@}")
+	local counter
+
+	for counter in "${counters[@]}"; do
+		flood_fetch_stat $counter
+	done
+}
+
+vxlan_flood_test()
+{
+	local mac=$1; shift
+	local dst=$1; shift
+	local -a expects=("${@}")
+
+	local -a counters=($h2 "vx2 ns1" "vx2 ns2")
+	local counter
+	local key
+
+	for counter in "${counters[@]}"; do
+		flood_counter_install $counter
+	done
+
+	local -a t0s=($(flood_fetch_stats "${counters[@]}"))
+	$MZ $h1 -c 10 -d 100msec -p 64 -b $mac -B $dst -t icmp -q
+	sleep 1
+	local -a t1s=($(flood_fetch_stats "${counters[@]}"))
+
+	for key in ${!t0s[@]}; do
+		local delta=$((t1s[$key] - t0s[$key]))
+		local expect=${expects[$key]}
+
+		((expect == delta))
+		check_err $? "${counters[$key]}: Expected to capture $expect packets, got $delta."
+	done
+
+	for counter in "${counters[@]}"; do
+		flood_counter_uninstall $counter
+	done
+}
+
+__test_flood()
+{
+	local mac=$1; shift
+	local dst=$1; shift
+	local what=$1; shift
+
+	RET=0
+
+	vxlan_flood_test $mac $dst 10 10 10
+
+	log_test "VXLAN: $what"
+}
+
+test_flood()
+{
+	__test_flood de:ad:be:ef:13:37 192.0.2.100 "flood"
+}
+
+vxlan_fdb_add_del()
+{
+	local add_del=$1; shift
+	local mac=$1; shift
+	local dev=$1; shift
+	local dst=$1; shift
+
+	bridge fdb $add_del dev $dev $mac self static permanent \
+		${dst:+dst} $dst 2>/dev/null
+	bridge fdb $add_del dev $dev $mac master static 2>/dev/null
+}
+
+__test_unicast()
+{
+	local mac=$1; shift
+	local dst=$1; shift
+	local hit_idx=$1; shift
+	local what=$1; shift
+
+	RET=0
+
+	local -a expects=(0 0 0)
+	expects[$hit_idx]=10
+
+	vxlan_flood_test $mac $dst "${expects[@]}"
+
+	log_test "VXLAN: $what"
+}
+
+test_unicast()
+{
+	local -a targets=("$h2_mac $h2"
+			  "$r1_mac vx1 192.0.2.34"
+			  "$r2_mac vx1 192.0.2.50")
+	local target
+
+	for target in "${targets[@]}"; do
+		vxlan_fdb_add_del add $target
+	done
+
+	__test_unicast $h2_mac 192.0.2.2 0 "local MAC unicast"
+	__test_unicast $r1_mac 192.0.2.3 1 "remote MAC 1 unicast"
+	__test_unicast $r2_mac 192.0.2.4 2 "remote MAC 2 unicast"
+
+	for target in "${targets[@]}"; do
+		vxlan_fdb_add_del del $target
+	done
+}
+
+vxlan_ping_test()
+{
+	local ping_dev=$1; shift
+	local ping_dip=$1; shift
+	local ping_args=$1; shift
+	local capture_dev=$1; shift
+	local capture_dir=$1; shift
+	local capture_pref=$1; shift
+	local expect=$1; shift
+
+	local t0=$(tc_rule_stats_get $capture_dev $capture_pref $capture_dir)
+	ping_do $ping_dev $ping_dip "$ping_args"
+	local t1=$(tc_rule_stats_get $capture_dev $capture_pref $capture_dir)
+	local delta=$((t1 - t0))
+
+	# Tolerate a couple stray extra packets.
+	((expect <= delta && delta <= expect + 2))
+	check_err $? "$capture_dev: Expected to capture $expect packets, got $delta."
+}
+
+test_ttl()
+{
+	RET=0
+
+	tc filter add dev v1 egress pref 77 prot ip \
+		flower ip_ttl 99 action pass
+	vxlan_ping_test $h1 192.0.2.3 "" v1 egress 77 10
+	tc filter del dev v1 egress pref 77 prot ip
+
+	log_test "VXLAN: envelope TTL"
+}
+
+test_tos()
+{
+	RET=0
+
+	tc filter add dev v1 egress pref 77 prot ip \
+		flower ip_tos 0x40 action pass
+	vxlan_ping_test $h1 192.0.2.3 "-Q 0x40" v1 egress 77 10
+	vxlan_ping_test $h1 192.0.2.3 "-Q 0x30" v1 egress 77 0
+	tc filter del dev v1 egress pref 77 prot ip
+
+	log_test "VXLAN: envelope TOS inheritance"
+}
+
+__test_ecn_encap()
+{
+	local q=$1; shift
+	local tos=$1; shift
+
+	RET=0
+
+	tc filter add dev v1 egress pref 77 prot ip \
+		flower ip_tos $tos action pass
+	sleep 1
+	vxlan_ping_test $h1 192.0.2.3 "-Q $q" v1 egress 77 10
+	tc filter del dev v1 egress pref 77 prot ip
+
+	log_test "VXLAN: ECN encap: $q->$tos"
+}
+
+test_ecn_encap()
+{
+	# In accordance with INET_ECN_encapsulate()
+	__test_ecn_encap 0x00 0x00
+	__test_ecn_encap 0x01 0x01
+	__test_ecn_encap 0x02 0x02
+	__test_ecn_encap 0x03 0x02
+}
+
+vxlan_encapped_ping_do()
+{
+	local count=$1; shift
+	local dev=$1; shift
+	local next_hop_mac=$1; shift
+	local dest_ip=$1; shift
+	local dest_mac=$1; shift
+	local inner_tos=$1; shift
+	local outer_tos=$1; shift
+
+	$MZ $dev -c $count -d 100msec -q \
+		-b $next_hop_mac -B $dest_ip \
+		-t udp tos=$outer_tos,sp=23456,dp=$VXPORT,p=$(:
+		    )"08:"$(                      : VXLAN flags
+		    )"00:00:00:"$(                : VXLAN reserved
+		    )"00:03:e8:"$(                : VXLAN VNI
+		    )"00:"$(                      : VXLAN reserved
+		    )"$dest_mac:"$(               : ETH daddr
+		    )"$(mac_get w2):"$(           : ETH saddr
+		    )"08:00:"$(                   : ETH type
+		    )"45:"$(                      : IP version + IHL
+		    )"$inner_tos:"$(              : IP TOS
+		    )"00:54:"$(                   : IP total length
+		    )"99:83:"$(                   : IP identification
+		    )"40:00:"$(                   : IP flags + frag off
+		    )"40:"$(                      : IP TTL
+		    )"01:"$(                      : IP proto
+		    )"00:00:"$(                   : IP header csum
+		    )"c0:00:02:03:"$(             : IP saddr: 192.0.2.3
+		    )"c0:00:02:01:"$(             : IP daddr: 192.0.2.1
+		    )"08:"$(                      : ICMP type
+		    )"00:"$(                      : ICMP code
+		    )"8b:f2:"$(                   : ICMP csum
+		    )"1f:6a:"$(                   : ICMP request identifier
+		    )"00:01:"$(                   : ICMP request sequence number
+		    )"4f:ff:c5:5b:00:00:00:00:"$( : ICMP payload
+		    )"6d:74:0b:00:00:00:00:00:"$( :
+		    )"10:11:12:13:14:15:16:17:"$( :
+		    )"18:19:1a:1b:1c:1d:1e:1f:"$( :
+		    )"20:21:22:23:24:25:26:27:"$( :
+		    )"28:29:2a:2b:2c:2d:2e:2f:"$( :
+		    )"30:31:32:33:34:35:36:37"
+}
+export -f vxlan_encapped_ping_do
+
+vxlan_encapped_ping_test()
+{
+	local ping_dev=$1; shift
+	local nh_dev=$1; shift
+	local ping_dip=$1; shift
+	local inner_tos=$1; shift
+	local outer_tos=$1; shift
+	local stat_get=$1; shift
+	local expect=$1; shift
+
+	local t0=$($stat_get)
+
+	in_ns ns1 \
+		vxlan_encapped_ping_do 10 $ping_dev $(mac_get $nh_dev) \
+			$ping_dip $(mac_get $h1) \
+			$inner_tos $outer_tos
+
+	local t1=$($stat_get)
+	local delta=$((t1 - t0))
+
+	# Tolerate a couple stray extra packets.
+	((expect <= delta && delta <= expect + 2))
+	check_err $? "Expected to capture $expect packets, got $delta."
+}
+export -f vxlan_encapped_ping_test
+
+__test_ecn_decap()
+{
+	local orig_inner_tos=$1; shift
+	local orig_outer_tos=$1; shift
+	local decapped_tos=$1; shift
+
+	RET=0
+
+	tc filter add dev $h1 ingress pref 77 prot ip \
+		flower ip_tos $decapped_tos action pass
+	sleep 1
+	vxlan_encapped_ping_test v2 v1 192.0.2.17 \
+				 $orig_inner_tos $orig_outer_tos \
+				 "tc_rule_stats_get $h1 77 ingress" 10
+	tc filter del dev $h1 ingress pref 77
+
+	log_test "VXLAN: ECN decap: $orig_outer_tos/$orig_inner_tos->$decapped_tos"
+}
+
+test_ecn_decap_error()
+{
+	local orig_inner_tos=00
+	local orig_outer_tos=03
+
+	RET=0
+
+	vxlan_encapped_ping_test v2 v1 192.0.2.17 \
+				 $orig_inner_tos $orig_outer_tos \
+				 "link_stats_rx_errors_get vx1" 10
+
+	log_test "VXLAN: ECN decap: $orig_outer_tos/$orig_inner_tos->error"
+}
+
+test_ecn_decap()
+{
+	# In accordance with INET_ECN_decapsulate()
+	__test_ecn_decap 00 00 0x00
+	__test_ecn_decap 01 01 0x01
+	__test_ecn_decap 02 01 0x02
+	__test_ecn_decap 01 03 0x03
+	__test_ecn_decap 02 03 0x03
+	test_ecn_decap_error
+}
+
+test_learning()
+{
+	local mac=de:ad:be:ef:13:37
+	local dst=192.0.2.100
+
+	# Enable learning on the VxLAN device and set ageing time to 10 seconds
+	ip link set dev br1 type bridge ageing_time 1000
+	ip link set dev vx1 type vxlan ageing 10
+	ip link set dev vx1 type vxlan learning
+	reapply_config
+
+	# Check that flooding works
+	RET=0
+
+	vxlan_flood_test $mac $dst 10 10 10
+
+	log_test "VXLAN: flood before learning"
+
+	# Send a packet with source mac set to $mac from host w2 and check that
+	# a corresponding entry is created in VxLAN device vx1
+	RET=0
+
+	in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+		-t icmp -q
+	sleep 1
+
+	bridge fdb show brport vx1 | grep $mac | grep -q self
+	check_err $?
+	bridge fdb show brport vx1 | grep $mac | grep -q -v self
+	check_err $?
+
+	log_test "VXLAN: show learned FDB entry"
+
+	# Repeat first test and check that packets only reach host w2 in ns1
+	RET=0
+
+	vxlan_flood_test $mac $dst 0 10 0
+
+	log_test "VXLAN: learned FDB entry"
+
+	# Delete the learned FDB entry from the VxLAN and bridge devices and
+	# check that packets are flooded
+	RET=0
+
+	bridge fdb del dev vx1 $mac master self
+	sleep 1
+
+	vxlan_flood_test $mac $dst 10 10 10
+
+	log_test "VXLAN: deletion of learned FDB entry"
+
+	# Re-learn the first FDB entry and check that it is correctly aged-out
+	RET=0
+
+	in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+		-t icmp -q
+	sleep 1
+
+	bridge fdb show brport vx1 | grep $mac | grep -q self
+	check_err $?
+	bridge fdb show brport vx1 | grep $mac | grep -q -v self
+	check_err $?
+
+	vxlan_flood_test $mac $dst 0 10 0
+
+	sleep 20
+
+	bridge fdb show brport vx1 | grep $mac | grep -q self
+	check_fail $?
+	bridge fdb show brport vx1 | grep $mac | grep -q -v self
+	check_fail $?
+
+	vxlan_flood_test $mac $dst 10 10 10
+
+	log_test "VXLAN: Ageing of learned FDB entry"
+
+	# Toggle learning on the bridge port and check that the bridge's FDB
+	# is populated only when it should
+	RET=0
+
+	ip link set dev vx1 type bridge_slave learning off
+
+	in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+		-t icmp -q
+	sleep 1
+
+	bridge fdb show brport vx1 | grep $mac | grep -q -v self
+	check_fail $?
+
+	ip link set dev vx1 type bridge_slave learning on
+
+	in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+		-t icmp -q
+	sleep 1
+
+	bridge fdb show brport vx1 | grep $mac | grep -q -v self
+	check_err $?
+
+	log_test "VXLAN: learning toggling on bridge port"
+
+	# Restore previous settings
+	ip link set dev vx1 type vxlan nolearning
+	ip link set dev vx1 type vxlan ageing 300
+	ip link set dev br1 type bridge ageing_time 30000
+	reapply_config
+}
+
+test_all()
+{
+	echo "Running tests with UDP port $VXPORT"
+	tests_run
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+test_all
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472.sh
new file mode 100755
index 0000000..3bf3da6
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# A wrapper to run VXLAN tests with an unusual port number.
+
+VXPORT=8472
+ALL_TESTS="
+	ping_ipv4
+"
+source vxlan_bridge_1d.sh
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index a369d61..e2c94e4 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -26,6 +26,47 @@
 # - pmtu_ipv6
 #	Same as pmtu_ipv4, except for locked PMTU tests, using IPv6
 #
+# - pmtu_ipv4_vxlan4_exception
+#	Set up the same network topology as pmtu_ipv4, create a VXLAN tunnel
+#	over IPv4 between A and B, routed via R1. On the link between R1 and B,
+#	set a MTU lower than the VXLAN MTU and the MTU on the link between A and
+#	R1. Send IPv4 packets, exceeding the MTU between R1 and B, over VXLAN
+#	from A to B and check that the PMTU exception is created with the right
+#	value on A
+#
+# - pmtu_ipv6_vxlan4_exception
+#	Same as pmtu_ipv4_vxlan4_exception, but send IPv6 packets from A to B
+#
+# - pmtu_ipv4_vxlan6_exception
+#	Same as pmtu_ipv4_vxlan4_exception, but use IPv6 transport from A to B
+#
+# - pmtu_ipv6_vxlan6_exception
+#	Same as pmtu_ipv4_vxlan6_exception, but send IPv6 packets from A to B
+#
+# - pmtu_ipv4_geneve4_exception
+#	Same as pmtu_ipv4_vxlan4_exception, but using a GENEVE tunnel instead of
+#	VXLAN
+#
+# - pmtu_ipv6_geneve4_exception
+#	Same as pmtu_ipv6_vxlan4_exception, but using a GENEVE tunnel instead of
+#	VXLAN
+#
+# - pmtu_ipv4_geneve6_exception
+#	Same as pmtu_ipv4_vxlan6_exception, but using a GENEVE tunnel instead of
+#	VXLAN
+#
+# - pmtu_ipv6_geneve6_exception
+#	Same as pmtu_ipv6_vxlan6_exception, but using a GENEVE tunnel instead of
+#	VXLAN
+#
+# - pmtu_ipv{4,6}_fou{4,6}_exception
+#	Same as pmtu_ipv4_vxlan4, but using a direct IPv4/IPv6 encapsulation
+#	(FoU) over IPv4/IPv6, instead of VXLAN
+#
+# - pmtu_ipv{4,6}_fou{4,6}_exception
+#	Same as pmtu_ipv4_vxlan4, but using a generic UDP IPv4/IPv6
+#	encapsulation (GUE) over IPv4/IPv6, instead of VXLAN
+#
 # - pmtu_vti4_exception
 #	Set up vti tunnel on top of veth, with xfrm states and policies, in two
 #	namespaces with matching endpoints. Check that route exception is not
@@ -72,6 +113,22 @@
 tests="
 	pmtu_ipv4_exception		ipv4: PMTU exceptions
 	pmtu_ipv6_exception		ipv6: PMTU exceptions
+	pmtu_ipv4_vxlan4_exception	IPv4 over vxlan4: PMTU exceptions
+	pmtu_ipv6_vxlan4_exception	IPv6 over vxlan4: PMTU exceptions
+	pmtu_ipv4_vxlan6_exception	IPv4 over vxlan6: PMTU exceptions
+	pmtu_ipv6_vxlan6_exception	IPv6 over vxlan6: PMTU exceptions
+	pmtu_ipv4_geneve4_exception	IPv4 over geneve4: PMTU exceptions
+	pmtu_ipv6_geneve4_exception	IPv6 over geneve4: PMTU exceptions
+	pmtu_ipv4_geneve6_exception	IPv4 over geneve6: PMTU exceptions
+	pmtu_ipv6_geneve6_exception	IPv6 over geneve6: PMTU exceptions
+	pmtu_ipv4_fou4_exception	IPv4 over fou4: PMTU exceptions
+	pmtu_ipv6_fou4_exception	IPv6 over fou4: PMTU exceptions
+	pmtu_ipv4_fou6_exception	IPv4 over fou6: PMTU exceptions
+	pmtu_ipv6_fou6_exception	IPv6 over fou6: PMTU exceptions
+	pmtu_ipv4_gue4_exception	IPv4 over gue4: PMTU exceptions
+	pmtu_ipv6_gue4_exception	IPv6 over gue4: PMTU exceptions
+	pmtu_ipv4_gue6_exception	IPv4 over gue6: PMTU exceptions
+	pmtu_ipv6_gue6_exception	IPv6 over gue6: PMTU exceptions
 	pmtu_vti6_exception		vti6: PMTU exceptions
 	pmtu_vti4_exception		vti4: PMTU exceptions
 	pmtu_vti4_default_mtu		vti4: default MTU assignment
@@ -95,8 +152,8 @@
 # Addresses are:
 # - IPv4: PREFIX4.SEGMENT.ID (/24)
 # - IPv6: PREFIX6:SEGMENT::ID (/64)
-prefix4="192.168"
-prefix6="fd00"
+prefix4="10.0"
+prefix6="fc00"
 a_r1=1
 a_r2=2
 b_r1=3
@@ -129,12 +186,12 @@
 veth6_b_addr="fd00:1::b"
 veth6_mask="64"
 
-vti4_a_addr="192.168.2.1"
-vti4_b_addr="192.168.2.2"
-vti4_mask="24"
-vti6_a_addr="fd00:2::a"
-vti6_b_addr="fd00:2::b"
-vti6_mask="64"
+tunnel4_a_addr="192.168.2.1"
+tunnel4_b_addr="192.168.2.2"
+tunnel4_mask="24"
+tunnel6_a_addr="fd00:2::a"
+tunnel6_b_addr="fd00:2::b"
+tunnel6_mask="64"
 
 dummy6_0_addr="fc00:1000::0"
 dummy6_1_addr="fc00:1001::0"
@@ -159,6 +216,89 @@
 	eval echo \$NS_$1
 }
 
+setup_fou_or_gue() {
+	outer="${1}"
+	inner="${2}"
+	encap="${3}"
+
+	if [ "${outer}" = "4" ]; then
+		modprobe fou || return 2
+		a_addr="${prefix4}.${a_r1}.1"
+		b_addr="${prefix4}.${b_r1}.1"
+		if [ "${inner}" = "4" ]; then
+			type="ipip"
+			ipproto="4"
+		else
+			type="sit"
+			ipproto="41"
+		fi
+	else
+		modprobe fou6 || return 2
+		a_addr="${prefix6}:${a_r1}::1"
+		b_addr="${prefix6}:${b_r1}::1"
+		if [ "${inner}" = "4" ]; then
+			type="ip6tnl"
+			mode="mode ipip6"
+			ipproto="4 -6"
+		else
+			type="ip6tnl"
+			mode="mode ip6ip6"
+			ipproto="41 -6"
+		fi
+	fi
+
+	${ns_a} ip fou add port 5555 ipproto ${ipproto} || return 2
+	${ns_a} ip link add ${encap}_a type ${type} ${mode} local ${a_addr} remote ${b_addr} encap ${encap} encap-sport auto encap-dport 5556 || return 2
+
+	${ns_b} ip fou add port 5556 ipproto ${ipproto}
+	${ns_b} ip link add ${encap}_b type ${type} ${mode} local ${b_addr} remote ${a_addr} encap ${encap} encap-sport auto encap-dport 5555
+
+	if [ "${inner}" = "4" ]; then
+		${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${encap}_a
+		${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${encap}_b
+	else
+		${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${encap}_a
+		${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${encap}_b
+	fi
+
+	${ns_a} ip link set ${encap}_a up
+	${ns_b} ip link set ${encap}_b up
+
+	sleep 1
+}
+
+setup_fou44() {
+	setup_fou_or_gue 4 4 fou
+}
+
+setup_fou46() {
+	setup_fou_or_gue 4 6 fou
+}
+
+setup_fou64() {
+	setup_fou_or_gue 6 4 fou
+}
+
+setup_fou66() {
+	setup_fou_or_gue 6 6 fou
+}
+
+setup_gue44() {
+	setup_fou_or_gue 4 4 gue
+}
+
+setup_gue46() {
+	setup_fou_or_gue 4 6 gue
+}
+
+setup_gue64() {
+	setup_fou_or_gue 6 4 gue
+}
+
+setup_gue66() {
+	setup_fou_or_gue 6 6 gue
+}
+
 setup_namespaces() {
 	for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
 		ip netns add ${n} || return 1
@@ -202,11 +342,57 @@
 }
 
 setup_vti4() {
-	setup_vti 4 ${veth4_a_addr} ${veth4_b_addr} ${vti4_a_addr} ${vti4_b_addr} ${vti4_mask}
+	setup_vti 4 ${veth4_a_addr} ${veth4_b_addr} ${tunnel4_a_addr} ${tunnel4_b_addr} ${tunnel4_mask}
 }
 
 setup_vti6() {
-	setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${vti6_a_addr} ${vti6_b_addr} ${vti6_mask}
+	setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
+}
+
+setup_vxlan_or_geneve() {
+	type="${1}"
+	a_addr="${2}"
+	b_addr="${3}"
+	opts="${4}"
+
+	if [ "${type}" = "vxlan" ]; then
+		opts="${opts} ttl 64 dstport 4789"
+		opts_a="local ${a_addr}"
+		opts_b="local ${b_addr}"
+	else
+		opts_a=""
+		opts_b=""
+	fi
+
+	${ns_a} ip link add ${type}_a type ${type} id 1 ${opts_a} remote ${b_addr} ${opts} || return 1
+	${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts}
+
+	${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a
+	${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
+
+	${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a
+	${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
+
+	${ns_a} ip link set ${type}_a up
+	${ns_b} ip link set ${type}_b up
+
+	sleep 1
+}
+
+setup_geneve4() {
+	setup_vxlan_or_geneve geneve ${prefix4}.${a_r1}.1  ${prefix4}.${b_r1}.1  "df set"
+}
+
+setup_vxlan4() {
+	setup_vxlan_or_geneve vxlan  ${prefix4}.${a_r1}.1  ${prefix4}.${b_r1}.1  "df set"
+}
+
+setup_geneve6() {
+	setup_vxlan_or_geneve geneve ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1
+}
+
+setup_vxlan6() {
+	setup_vxlan_or_geneve vxlan  ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1
 }
 
 setup_xfrm() {
@@ -465,6 +651,161 @@
 	test_pmtu_ipvX 6
 }
 
+test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
+	type=${1}
+	family=${2}
+	outer_family=${3}
+	ll_mtu=4000
+
+	if [ ${outer_family} -eq 4 ]; then
+		setup namespaces routing ${type}4 || return 2
+		#                      IPv4 header   UDP header   VXLAN/GENEVE header   Ethernet header
+		exp_mtu=$((${ll_mtu} - 20          - 8          - 8                   - 14))
+	else
+		setup namespaces routing ${type}6 || return 2
+		#                      IPv6 header   UDP header   VXLAN/GENEVE header   Ethernet header
+		exp_mtu=$((${ll_mtu} - 40          - 8          - 8                   - 14))
+	fi
+
+	trace "${ns_a}" ${type}_a    "${ns_b}"  ${type}_b \
+	      "${ns_a}" veth_A-R1    "${ns_r1}" veth_R1-A \
+	      "${ns_b}" veth_B-R1    "${ns_r1}" veth_R1-B
+
+	if [ ${family} -eq 4 ]; then
+		ping=ping
+		dst=${tunnel4_b_addr}
+	else
+		ping=${ping6}
+		dst=${tunnel6_b_addr}
+	fi
+
+	# Create route exception by exceeding link layer MTU
+	mtu "${ns_a}"  veth_A-R1 $((${ll_mtu} + 1000))
+	mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+	mtu "${ns_b}"  veth_B-R1 ${ll_mtu}
+	mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+	mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
+	mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
+	${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+
+	# Check that exception was created
+	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
+	check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on ${type} interface"
+}
+
+test_pmtu_ipv4_vxlan4_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan  4 4
+}
+
+test_pmtu_ipv6_vxlan4_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan  6 4
+}
+
+test_pmtu_ipv4_geneve4_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 4 4
+}
+
+test_pmtu_ipv6_geneve4_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 6 4
+}
+
+test_pmtu_ipv4_vxlan6_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan  4 6
+}
+
+test_pmtu_ipv6_vxlan6_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan  6 6
+}
+
+test_pmtu_ipv4_geneve6_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 4 6
+}
+
+test_pmtu_ipv6_geneve6_exception() {
+	test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 6 6
+}
+
+test_pmtu_ipvX_over_fouY_or_gueY() {
+	inner_family=${1}
+	outer_family=${2}
+	encap=${3}
+	ll_mtu=4000
+
+	setup namespaces routing ${encap}${outer_family}${inner_family} || return 2
+	trace "${ns_a}" ${encap}_a   "${ns_b}"  ${encap}_b \
+	      "${ns_a}" veth_A-R1    "${ns_r1}" veth_R1-A \
+	      "${ns_b}" veth_B-R1    "${ns_r1}" veth_R1-B
+
+	if [ ${inner_family} -eq 4 ]; then
+		ping=ping
+		dst=${tunnel4_b_addr}
+	else
+		ping=${ping6}
+		dst=${tunnel6_b_addr}
+	fi
+
+	if [ "${encap}" = "gue" ]; then
+		encap_overhead=4
+	else
+		encap_overhead=0
+	fi
+
+	if [ ${outer_family} -eq 4 ]; then
+		#                      IPv4 header   UDP header
+		exp_mtu=$((${ll_mtu} - 20          - 8         - ${encap_overhead}))
+	else
+		#                      IPv6 header   Option 4   UDP header
+		exp_mtu=$((${ll_mtu} - 40          - 8        - 8       - ${encap_overhead}))
+	fi
+
+	# Create route exception by exceeding link layer MTU
+	mtu "${ns_a}"  veth_A-R1 $((${ll_mtu} + 1000))
+	mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+	mtu "${ns_b}"  veth_B-R1 ${ll_mtu}
+	mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+	mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
+	mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
+	${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+
+	# Check that exception was created
+	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
+	check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on ${encap} interface"
+}
+
+test_pmtu_ipv4_fou4_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 4 4 fou
+}
+
+test_pmtu_ipv6_fou4_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 6 4 fou
+}
+
+test_pmtu_ipv4_fou6_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 4 6 fou
+}
+
+test_pmtu_ipv6_fou6_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 6 6 fou
+}
+
+test_pmtu_ipv4_gue4_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 4 4 gue
+}
+
+test_pmtu_ipv6_gue4_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 6 4 gue
+}
+
+test_pmtu_ipv4_gue6_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 4 6 gue
+}
+
+test_pmtu_ipv6_gue6_exception() {
+	test_pmtu_ipvX_over_fouY_or_gueY 6 6 gue
+}
+
 test_pmtu_vti4_exception() {
 	setup namespaces veth vti4 xfrm4 || return 2
 	trace "${ns_a}" veth_a    "${ns_b}" veth_b \
@@ -484,14 +825,14 @@
 
 	# Send DF packet without exceeding link layer MTU, check that no
 	# exception is created
-	${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${vti4_b_addr} > /dev/null
-	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
+	${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
 	check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
 
 	# Now exceed link layer MTU by one byte, check that exception is created
 	# with the right PMTU value
-	${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${vti4_b_addr} > /dev/null
-	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
+	${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
 	check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
 }
 
@@ -506,20 +847,20 @@
 	mtu "${ns_b}" veth_b 4000
 	mtu "${ns_a}" vti6_a 5000
 	mtu "${ns_b}" vti6_b 5000
-	${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
+	${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${tunnel6_b_addr} > /dev/null
 
 	# Check that exception was created
-	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
 	check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1
 
 	# Decrease tunnel MTU, check for PMTU decrease in route exception
 	mtu "${ns_a}" vti6_a 3000
-	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
 	check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1
 
 	# Increase tunnel MTU, check for PMTU increase in route exception
 	mtu "${ns_a}" vti6_a 9000
-	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+	pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
 	check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1
 
 	return ${fail}
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
new file mode 100755
index 0000000..aeac53a
--- /dev/null
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -0,0 +1,182 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run a series of udpgro functional tests.
+
+readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+cleanup() {
+	local -r jobs="$(jobs -p)"
+	local -r ns="$(ip netns list|grep $PEER_NS)"
+
+	[ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
+	[ -n "$ns" ] && ip netns del $ns 2>/dev/null
+}
+trap cleanup EXIT
+
+cfg_veth() {
+	ip netns add "${PEER_NS}"
+	ip -netns "${PEER_NS}" link set lo up
+	ip link add type veth
+	ip link set dev veth0 up
+	ip addr add dev veth0 192.168.1.2/24
+	ip addr add dev veth0 2001:db8::2/64 nodad
+
+	ip link set dev veth1 netns "${PEER_NS}"
+	ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
+	ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
+	ip -netns "${PEER_NS}" link set dev veth1 up
+	ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+}
+
+run_one() {
+	# use 'rx' as separator between sender args and receiver args
+	local -r all="$@"
+	local -r tx_args=${all%rx*}
+	local -r rx_args=${all#*rx}
+
+	cfg_veth
+
+	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+		echo "ok" || \
+		echo "failed" &
+
+	# Hack: let bg programs complete the startup
+	sleep 0.1
+	./udpgso_bench_tx ${tx_args}
+	wait $(jobs -p)
+}
+
+run_test() {
+	local -r args=$@
+
+	printf " %-40s" "$1"
+	./in_netns.sh $0 __subprocess $2 rx -G -r $3
+}
+
+run_one_nat() {
+	# use 'rx' as separator between sender args and receiver args
+	local addr1 addr2 pid family="" ipt_cmd=ip6tables
+	local -r all="$@"
+	local -r tx_args=${all%rx*}
+	local -r rx_args=${all#*rx}
+
+	if [[ ${tx_args} = *-4* ]]; then
+		ipt_cmd=iptables
+		family=-4
+		addr1=192.168.1.1
+		addr2=192.168.1.3/24
+	else
+		addr1=2001:db8::1
+		addr2="2001:db8::3/64 nodad"
+	fi
+
+	cfg_veth
+	ip -netns "${PEER_NS}" addr add dev veth1 ${addr2}
+
+	# fool the GRO engine changing the destination address ...
+	ip netns exec "${PEER_NS}" $ipt_cmd -t nat -I PREROUTING -d ${addr1} -j DNAT --to-destination ${addr2%/*}
+
+	# ... so that GRO will match the UDP_GRO enabled socket, but packets
+	# will land on the 'plain' one
+	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
+	pid=$!
+	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${family} -b ${addr2%/*} ${rx_args} && \
+		echo "ok" || \
+		echo "failed"&
+
+	sleep 0.1
+	./udpgso_bench_tx ${tx_args}
+	kill -INT $pid
+	wait $(jobs -p)
+}
+
+run_one_2sock() {
+	# use 'rx' as separator between sender args and receiver args
+	local -r all="$@"
+	local -r tx_args=${all%rx*}
+	local -r rx_args=${all#*rx}
+
+	cfg_veth
+
+	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -p 12345 &
+	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+		echo "ok" || \
+		echo "failed" &
+
+	# Hack: let bg programs complete the startup
+	sleep 0.1
+	./udpgso_bench_tx ${tx_args} -p 12345
+	sleep 0.1
+	# first UDP GSO socket should be closed at this point
+	./udpgso_bench_tx ${tx_args}
+	wait $(jobs -p)
+}
+
+run_nat_test() {
+	local -r args=$@
+
+	printf " %-40s" "$1"
+	./in_netns.sh $0 __subprocess_nat $2 rx -r $3
+}
+
+run_2sock_test() {
+	local -r args=$@
+
+	printf " %-40s" "$1"
+	./in_netns.sh $0 __subprocess_2sock $2 rx -G -r $3
+}
+
+run_all() {
+	local -r core_args="-l 4"
+	local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
+	local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+
+	echo "ipv4"
+	run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400"
+
+	# explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1)
+	# when GRO does not take place
+	run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1"
+
+	# the GSO packets are aggregated because:
+	# * veth schedule napi after each xmit
+	# * segmentation happens in BH context, veth napi poll is delayed after
+	#   the transmission of the last segment
+	run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720"
+	run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+	run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720"
+	run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500"
+
+	run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472"
+	run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+
+	echo "ipv6"
+	run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400"
+	run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1"
+	run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520"
+	run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452"
+	run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520"
+	run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500"
+
+	run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452"
+	run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452"
+}
+
+if [ ! -f ../bpf/xdp_dummy.o ]; then
+	echo "Missing xdp_dummy helper. Build bpf selftest first"
+	exit -1
+fi
+
+if [[ $# -eq 0 ]]; then
+	run_all
+elif [[ $1 == "__subprocess" ]]; then
+	shift
+	run_one $@
+elif [[ $1 == "__subprocess_nat" ]]; then
+	shift
+	run_one_nat $@
+elif [[ $1 == "__subprocess_2sock" ]]; then
+	shift
+	run_one_2sock $@
+fi
diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
new file mode 100755
index 0000000..820bc50
--- /dev/null
+++ b/tools/testing/selftests/net/udpgro_bench.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run a series of udpgro benchmarks
+
+readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+cleanup() {
+	local -r jobs="$(jobs -p)"
+	local -r ns="$(ip netns list|grep $PEER_NS)"
+
+	[ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
+	[ -n "$ns" ] && ip netns del $ns 2>/dev/null
+}
+trap cleanup EXIT
+
+run_one() {
+	# use 'rx' as separator between sender args and receiver args
+	local -r all="$@"
+	local -r tx_args=${all%rx*}
+	local rx_args=${all#*rx}
+
+	[[ "${tx_args}" == *"-4"* ]] && rx_args="${rx_args} -4"
+
+	ip netns add "${PEER_NS}"
+	ip -netns "${PEER_NS}" link set lo up
+	ip link add type veth
+	ip link set dev veth0 up
+	ip addr add dev veth0 192.168.1.2/24
+	ip addr add dev veth0 2001:db8::2/64 nodad
+
+	ip link set dev veth1 netns "${PEER_NS}"
+	ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
+	ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
+	ip -netns "${PEER_NS}" link set dev veth1 up
+
+	ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
+
+	# Hack: let bg programs complete the startup
+	sleep 0.1
+	./udpgso_bench_tx ${tx_args}
+}
+
+run_in_netns() {
+	local -r args=$@
+
+	./in_netns.sh $0 __subprocess ${args}
+}
+
+run_udp() {
+	local -r args=$@
+
+	echo "udp gso - over veth touching data"
+	run_in_netns ${args} -S 0 rx
+
+	echo "udp gso and gro - over veth touching data"
+	run_in_netns ${args} -S 0 rx -G
+}
+
+run_tcp() {
+	local -r args=$@
+
+	echo "tcp - over veth touching data"
+	run_in_netns ${args} -t rx
+}
+
+run_all() {
+	local -r core_args="-l 4"
+	local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
+	local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+
+	echo "ipv4"
+	run_tcp "${ipv4_args}"
+	run_udp "${ipv4_args}"
+
+	echo "ipv6"
+	run_tcp "${ipv4_args}"
+	run_udp "${ipv6_args}"
+}
+
+if [ ! -f ../bpf/xdp_dummy.o ]; then
+	echo "Missing xdp_dummy helper. Build bpf selftest first"
+	exit -1
+fi
+
+if [[ $# -eq 0 ]]; then
+	run_all
+elif [[ $1 == "__subprocess" ]]; then
+	shift
+	run_one $@
+else
+	run_in_netns $@
+fi
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 99e537a..0f06286 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -34,7 +34,7 @@
 	run_in_netns ${args}
 
 	echo "udp gso"
-	run_in_netns ${args} -S
+	run_in_netns ${args} -S 0
 }
 
 run_tcp() {
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 727cf67..0c960f6 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -31,9 +31,21 @@
 #include <sys/wait.h>
 #include <unistd.h>
 
+#ifndef UDP_GRO
+#define UDP_GRO		104
+#endif
+
 static int  cfg_port		= 8000;
 static bool cfg_tcp;
 static bool cfg_verify;
+static bool cfg_read_all;
+static bool cfg_gro_segment;
+static int  cfg_family		= PF_INET6;
+static int  cfg_alen 		= sizeof(struct sockaddr_in6);
+static int  cfg_expected_pkt_nr;
+static int  cfg_expected_pkt_len;
+static int  cfg_expected_gso_size;
+static struct sockaddr_storage cfg_bind_addr;
 
 static bool interrupted;
 static unsigned long packets, bytes;
@@ -44,6 +56,29 @@ static void sigint_handler(int signum)
 		interrupted = true;
 }
 
+static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
+{
+	struct sockaddr_in6 *addr6 = (void *) sockaddr;
+	struct sockaddr_in *addr4 = (void *) sockaddr;
+
+	switch (domain) {
+	case PF_INET:
+		addr4->sin_family = AF_INET;
+		addr4->sin_port = htons(cfg_port);
+		if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+			error(1, 0, "ipv4 parse error: %s", str_addr);
+		break;
+	case PF_INET6:
+		addr6->sin6_family = AF_INET6;
+		addr6->sin6_port = htons(cfg_port);
+		if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+			error(1, 0, "ipv6 parse error: %s", str_addr);
+		break;
+	default:
+		error(1, 0, "illegal domain");
+	}
+}
+
 static unsigned long gettimeofday_ms(void)
 {
 	struct timeval tv;
@@ -63,6 +98,8 @@ static void do_poll(int fd)
 
 	do {
 		ret = poll(&pfd, 1, 10);
+		if (interrupted)
+			break;
 		if (ret == -1)
 			error(1, errno, "poll");
 		if (ret == 0)
@@ -70,15 +107,14 @@ static void do_poll(int fd)
 		if (pfd.revents != POLLIN)
 			error(1, errno, "poll: 0x%x expected 0x%x\n",
 					pfd.revents, POLLIN);
-	} while (!ret && !interrupted);
+	} while (!ret);
 }
 
 static int do_socket(bool do_tcp)
 {
-	struct sockaddr_in6 addr = {0};
 	int fd, val;
 
-	fd = socket(PF_INET6, cfg_tcp ? SOCK_STREAM : SOCK_DGRAM, 0);
+	fd = socket(cfg_family, cfg_tcp ? SOCK_STREAM : SOCK_DGRAM, 0);
 	if (fd == -1)
 		error(1, errno, "socket");
 
@@ -89,10 +125,7 @@ static int do_socket(bool do_tcp)
 	if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val)))
 		error(1, errno, "setsockopt reuseport");
 
-	addr.sin6_family =	PF_INET6;
-	addr.sin6_port =	htons(cfg_port);
-	addr.sin6_addr =	in6addr_any;
-	if (bind(fd, (void *) &addr, sizeof(addr)))
+	if (bind(fd, (void *)&cfg_bind_addr, cfg_alen))
 		error(1, errno, "bind");
 
 	if (do_tcp) {
@@ -102,6 +135,8 @@ static int do_socket(bool do_tcp)
 			error(1, errno, "listen");
 
 		do_poll(accept_fd);
+		if (interrupted)
+			exit(0);
 
 		fd = accept(accept_fd, NULL, NULL);
 		if (fd == -1)
@@ -164,51 +199,123 @@ static void do_verify_udp(const char *data, int len)
 	}
 }
 
+static int recv_msg(int fd, char *buf, int len, int *gso_size)
+{
+	char control[CMSG_SPACE(sizeof(uint16_t))] = {0};
+	struct msghdr msg = {0};
+	struct iovec iov = {0};
+	struct cmsghdr *cmsg;
+	uint16_t *gsosizeptr;
+	int ret;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+
+	msg.msg_control = control;
+	msg.msg_controllen = sizeof(control);
+
+	*gso_size = -1;
+	ret = recvmsg(fd, &msg, MSG_TRUNC | MSG_DONTWAIT);
+	if (ret != -1) {
+		for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
+		     cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+			if (cmsg->cmsg_level == SOL_UDP
+			    && cmsg->cmsg_type == UDP_GRO) {
+				gsosizeptr = (uint16_t *) CMSG_DATA(cmsg);
+				*gso_size = *gsosizeptr;
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
 /* Flush all outstanding datagrams. Verify first few bytes of each. */
 static void do_flush_udp(int fd)
 {
-	static char rbuf[ETH_DATA_LEN];
-	int ret, len, budget = 256;
+	static char rbuf[ETH_MAX_MTU];
+	int ret, len, gso_size, budget = 256;
 
-	len = cfg_verify ? sizeof(rbuf) : 0;
+	len = cfg_read_all ? sizeof(rbuf) : 0;
 	while (budget--) {
 		/* MSG_TRUNC will make return value full datagram length */
-		ret = recv(fd, rbuf, len, MSG_TRUNC | MSG_DONTWAIT);
+		if (!cfg_expected_gso_size)
+			ret = recv(fd, rbuf, len, MSG_TRUNC | MSG_DONTWAIT);
+		else
+			ret = recv_msg(fd, rbuf, len, &gso_size);
 		if (ret == -1 && errno == EAGAIN)
-			return;
+			break;
 		if (ret == -1)
 			error(1, errno, "recv");
-		if (len) {
+		if (cfg_expected_pkt_len && ret != cfg_expected_pkt_len)
+			error(1, 0, "recv: bad packet len, got %d,"
+			      " expected %d\n", ret, cfg_expected_pkt_len);
+		if (len && cfg_verify) {
 			if (ret == 0)
 				error(1, errno, "recv: 0 byte datagram\n");
 
 			do_verify_udp(rbuf, ret);
 		}
+		if (cfg_expected_gso_size && cfg_expected_gso_size != gso_size)
+			error(1, 0, "recv: bad gso size, got %d, expected %d "
+			      "(-1 == no gso cmsg))\n", gso_size,
+			      cfg_expected_gso_size);
 
 		packets++;
 		bytes += ret;
+		if (cfg_expected_pkt_nr && packets >= cfg_expected_pkt_nr)
+			break;
 	}
 }
 
 static void usage(const char *filepath)
 {
-	error(1, 0, "Usage: %s [-tv] [-p port]", filepath);
+	error(1, 0, "Usage: %s [-Grtv] [-b addr] [-p port] [-l pktlen] [-n packetnr] [-S gsosize]", filepath);
 }
 
 static void parse_opts(int argc, char **argv)
 {
 	int c;
 
-	while ((c = getopt(argc, argv, "ptv")) != -1) {
+	/* bind to any by default */
+	setup_sockaddr(PF_INET6, "::", &cfg_bind_addr);
+	while ((c = getopt(argc, argv, "4b:Gl:n:p:rS:tv")) != -1) {
 		switch (c) {
+		case '4':
+			cfg_family = PF_INET;
+			cfg_alen = sizeof(struct sockaddr_in);
+			setup_sockaddr(PF_INET, "0.0.0.0", &cfg_bind_addr);
+			break;
+		case 'b':
+			setup_sockaddr(cfg_family, optarg, &cfg_bind_addr);
+			break;
+		case 'G':
+			cfg_gro_segment = true;
+			break;
+		case 'l':
+			cfg_expected_pkt_len = strtoul(optarg, NULL, 0);
+			break;
+		case 'n':
+			cfg_expected_pkt_nr = strtoul(optarg, NULL, 0);
+			break;
 		case 'p':
-			cfg_port = htons(strtoul(optarg, NULL, 0));
+			cfg_port = strtoul(optarg, NULL, 0);
+			break;
+		case 'r':
+			cfg_read_all = true;
+			break;
+		case 'S':
+			cfg_expected_gso_size = strtol(optarg, NULL, 0);
 			break;
 		case 't':
 			cfg_tcp = true;
 			break;
 		case 'v':
 			cfg_verify = true;
+			cfg_read_all = true;
 			break;
 		}
 	}
@@ -223,12 +330,23 @@ static void parse_opts(int argc, char **argv)
 static void do_recv(void)
 {
 	unsigned long tnow, treport;
-	int fd;
+	int fd, loop = 0;
 
 	fd = do_socket(cfg_tcp);
 
+	if (cfg_gro_segment && !cfg_tcp) {
+		int val = 1;
+		if (setsockopt(fd, IPPROTO_UDP, UDP_GRO, &val, sizeof(val)))
+			error(1, errno, "setsockopt UDP_GRO");
+	}
+
 	treport = gettimeofday_ms() + 1000;
 	do {
+		/* force termination after the second poll(); this cope both
+		 * with sender slower than receiver and missing packet errors
+		 */
+		if (cfg_expected_pkt_nr && loop++)
+			interrupted = true;
 		do_poll(fd);
 
 		if (cfg_tcp)
@@ -249,6 +367,10 @@ static void do_recv(void)
 
 	} while (!interrupted);
 
+	if (cfg_expected_pkt_nr && (packets != cfg_expected_pkt_nr))
+		error(1, 0, "wrong packet number! got %ld, expected %d\n",
+		      packets, cfg_expected_pkt_nr);
+
 	if (close(fd))
 		error(1, errno, "close");
 }
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index e821564..4074538 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -52,6 +52,8 @@ static bool	cfg_segment;
 static bool	cfg_sendmmsg;
 static bool	cfg_tcp;
 static bool	cfg_zerocopy;
+static int	cfg_msg_nr;
+static uint16_t	cfg_gso_size;
 
 static socklen_t cfg_alen;
 static struct sockaddr_storage cfg_dst_addr;
@@ -205,14 +207,14 @@ static void send_udp_segment_cmsg(struct cmsghdr *cm)
 
 	cm->cmsg_level = SOL_UDP;
 	cm->cmsg_type = UDP_SEGMENT;
-	cm->cmsg_len = CMSG_LEN(sizeof(cfg_mss));
+	cm->cmsg_len = CMSG_LEN(sizeof(cfg_gso_size));
 	valp = (void *)CMSG_DATA(cm);
-	*valp = cfg_mss;
+	*valp = cfg_gso_size;
 }
 
 static int send_udp_segment(int fd, char *data)
 {
-	char control[CMSG_SPACE(sizeof(cfg_mss))] = {0};
+	char control[CMSG_SPACE(sizeof(cfg_gso_size))] = {0};
 	struct msghdr msg = {0};
 	struct iovec iov = {0};
 	int ret;
@@ -241,7 +243,7 @@ static int send_udp_segment(int fd, char *data)
 
 static void usage(const char *filepath)
 {
-	error(1, 0, "Usage: %s [-46cmStuz] [-C cpu] [-D dst ip] [-l secs] [-p port] [-s sendsize]",
+	error(1, 0, "Usage: %s [-46cmtuz] [-C cpu] [-D dst ip] [-l secs] [-m messagenr] [-p port] [-s sendsize] [-S gsosize]",
 		    filepath);
 }
 
@@ -250,7 +252,7 @@ static void parse_opts(int argc, char **argv)
 	int max_len, hdrlen;
 	int c;
 
-	while ((c = getopt(argc, argv, "46cC:D:l:mp:s:Stuz")) != -1) {
+	while ((c = getopt(argc, argv, "46cC:D:l:mM:p:s:S:tuz")) != -1) {
 		switch (c) {
 		case '4':
 			if (cfg_family != PF_UNSPEC)
@@ -279,6 +281,9 @@ static void parse_opts(int argc, char **argv)
 		case 'm':
 			cfg_sendmmsg = true;
 			break;
+		case 'M':
+			cfg_msg_nr = strtoul(optarg, NULL, 10);
+			break;
 		case 'p':
 			cfg_port = strtoul(optarg, NULL, 0);
 			break;
@@ -286,6 +291,7 @@ static void parse_opts(int argc, char **argv)
 			cfg_payload_len = strtoul(optarg, NULL, 0);
 			break;
 		case 'S':
+			cfg_gso_size = strtoul(optarg, NULL, 0);
 			cfg_segment = true;
 			break;
 		case 't':
@@ -317,6 +323,8 @@ static void parse_opts(int argc, char **argv)
 
 	cfg_mss = ETH_DATA_LEN - hdrlen;
 	max_len = ETH_MAX_MTU - hdrlen;
+	if (!cfg_gso_size)
+		cfg_gso_size = cfg_mss;
 
 	if (cfg_payload_len > max_len)
 		error(1, 0, "payload length %u exceeds max %u",
@@ -392,10 +400,12 @@ int main(int argc, char **argv)
 		else
 			num_sends += send_udp(fd, buf[i]);
 		num_msgs++;
-
 		if (cfg_zerocopy && ((num_msgs & 0xF) == 0))
 			flush_zerocopy(fd);
 
+		if (cfg_msg_nr && num_msgs >= cfg_msg_nr)
+			break;
+
 		tnow = gettimeofday_ms();
 		if (tnow > treport) {
 			fprintf(stderr,