Enforce GKI API buffer usage

Also add another API GKI_queue_length(BUFFER_Q *)
diff --git a/stack/avct/avct_lcb_act.c b/stack/avct/avct_lcb_act.c
index 17d5d3d..4ca1745 100644
--- a/stack/avct/avct_lcb_act.c
+++ b/stack/avct/avct_lcb_act.c
@@ -618,7 +618,7 @@
             pkt_type = AVCT_PKT_TYPE_END;
         }
     }
-    AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d", p_lcb->tx_q.count);
+    AVCT_TRACE_DEBUG ("avct_lcb_send_msg tx_q_count:%d", GKI_queue_length(&p_lcb->tx_q));
     return;
 }
 
diff --git a/stack/avdt/avdt_scb_act.c b/stack/avdt/avdt_scb_act.c
index 72dd3cd..8c46f7b 100644
--- a/stack/avdt/avdt_scb_act.c
+++ b/stack/avdt/avdt_scb_act.c
@@ -1421,7 +1421,7 @@
     BT_HDR          *p_frag;
 
     AVDT_TRACE_WARNING("avdt_scb_snd_stream_close c:%d, off:%d",
-        p_scb->frag_q.count, p_scb->frag_off);
+        GKI_queue_length(&p_scb->frag_q), p_scb->frag_off);
     /* clean fragments queue */
     while((p_frag = (BT_HDR*)GKI_dequeue (&p_scb->frag_q)) != NULL)
          GKI_freebuf(p_frag);
diff --git a/stack/bnep/bnep_api.c b/stack/bnep/bnep_api.c
index b1d8c7c..87b2cb5 100644
--- a/stack/bnep/bnep_api.c
+++ b/stack/bnep/bnep_api.c
@@ -432,7 +432,7 @@
     }
 
     /* Check transmit queue */
-    if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH)
+    if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH)
     {
         GKI_freebuf (p_buf);
         return (BNEP_Q_SIZE_EXCEEDED);
@@ -538,7 +538,7 @@
     }
 
     /* Check transmit queue */
-    if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH)
+    if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH)
         return (BNEP_Q_SIZE_EXCEEDED);
 
     /* Get a buffer to copy teh data into */
@@ -762,7 +762,7 @@
     p_status->con_status            = BNEP_STATUS_CONNECTED;
     p_status->l2cap_cid             = p_bcb->l2cap_cid;
     p_status->rem_mtu_size          = p_bcb->rem_mtu_size;
-    p_status->xmit_q_depth          = p_bcb->xmit_q.count;
+    p_status->xmit_q_depth          = GKI_queue_length(&p_bcb->xmit_q);
     p_status->sent_num_filters      = p_bcb->sent_num_filters;
     p_status->sent_mcast_filters    = p_bcb->sent_mcast_filters;
     p_status->rcvd_num_filters      = p_bcb->rcvd_num_filters;
diff --git a/stack/bnep/bnep_utils.c b/stack/bnep/bnep_utils.c
index 92061d0..89c471c 100644
--- a/stack/bnep/bnep_utils.c
+++ b/stack/bnep/bnep_utils.c
@@ -152,7 +152,7 @@
     p_bcb->p_pending_data   = NULL;
 
     /* Free transmit queue */
-    while (p_bcb->xmit_q.count)
+    while (!GKI_queue_is_empty(&p_bcb->xmit_q))
     {
         GKI_freebuf (GKI_dequeue (&p_bcb->xmit_q));
     }
@@ -455,7 +455,7 @@
     BNEP_TRACE_EVENT ("BNEP - bnepu_check_send_packet for CID: 0x%x", p_bcb->l2cap_cid);
     if (p_bcb->con_flags & BNEP_FLAGS_L2CAP_CONGESTED)
     {
-        if (p_bcb->xmit_q.count >= BNEP_MAX_XMITQ_DEPTH)
+        if (GKI_queue_length(&p_bcb->xmit_q) >= BNEP_MAX_XMITQ_DEPTH)
         {
             BNEP_TRACE_EVENT ("BNEP - congested, dropping buf, CID: 0x%x", p_bcb->l2cap_cid);
 
diff --git a/stack/btm/btm_ble_bgconn.c b/stack/btm/btm_ble_bgconn.c
index b33aa2e..2fabe23 100644
--- a/stack/btm/btm_ble_bgconn.c
+++ b/stack/btm/btm_ble_bgconn.c
@@ -705,7 +705,7 @@
     tBTM_BLE_CONN_REQ *p_req;
     BOOLEAN     rt = FALSE;
 
-    if ( btm_cb.ble_ctr_cb.conn_pending_q.count )
+    if (!GKI_queue_is_empty(&btm_cb.ble_ctr_cb.conn_pending_q))
     {
         p_req = (tBTM_BLE_CONN_REQ*)GKI_dequeue (&btm_cb.ble_ctr_cb.conn_pending_q);
 
diff --git a/stack/btm/btm_pm.c b/stack/btm/btm_pm.c
index 76bfc04..446c88d 100644
--- a/stack/btm/btm_pm.c
+++ b/stack/btm/btm_pm.c
@@ -1053,7 +1053,7 @@
     /* Scan state-paging, inquiry, and trying to connect */
 
     /* Check for paging */
-    if (btm_cb.is_paging || btm_cb.page_queue.count > 0 ||
+    if (btm_cb.is_paging || GKI_queue_length(&btm_cb.page_queue) > 0 ||
        BTM_BL_PAGING_STARTED == btm_cb.busy_level)
     {
        BTM_TRACE_DEBUG("btm_pm_device_in_scan_state- paging");
diff --git a/stack/btu/btu_hcif.c b/stack/btu/btu_hcif.c
index 9ec8dd3..aea9800 100644
--- a/stack/btu/btu_hcif.c
+++ b/stack/btu/btu_hcif.c
@@ -460,7 +460,7 @@
 #endif
 
     /* If there are already commands in the queue, then enqueue this command */
-    if ((p_buf) && (p_hci_cmd_cb->cmd_xmit_q.count))
+    if ((p_buf) && (!GKI_queue_is_empty(&p_hci_cmd_cb->cmd_xmit_q)))
     {
         GKI_enqueue (&(p_hci_cmd_cb->cmd_xmit_q), p_buf);
         p_buf = NULL;
@@ -471,7 +471,7 @@
          && (p_hci_cmd_cb->cmd_window == 0)
          && (btm_cb.devcb.state == BTM_DEV_STATE_WAIT_RESET_CMPLT)) )
     {
-        p_hci_cmd_cb->cmd_window = p_hci_cmd_cb->cmd_xmit_q.count + 1;
+        p_hci_cmd_cb->cmd_window = GKI_queue_length(&p_hci_cmd_cb->cmd_xmit_q) + 1;
     }
 
     /* See if we can send anything */
diff --git a/stack/btu/btu_task.c b/stack/btu/btu_task.c
index 8d1fd04..9045dac 100644
--- a/stack/btu/btu_task.c
+++ b/stack/btu/btu_task.c
@@ -921,8 +921,8 @@
 *******************************************************************************/
 void btu_check_bt_sleep (void)
 {
-    if ((btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_cmpl_q.count == 0)
-        &&(btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_xmit_q.count == 0))
+    if ((GKI_queue_is_empty(&btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_cmpl_q)
+        && GKI_queue_is_empty(&btu_cb.hci_cmd_cb[LOCAL_BR_EDR_CONTROLLER_ID].cmd_xmit_q)))
     {
         if (l2cb.controller_xmit_window == l2cb.num_lm_acl_bufs)
         {
diff --git a/stack/gap/gap_conn.c b/stack/gap/gap_conn.c
index d1e96d8..61f8892 100644
--- a/stack/gap/gap_conn.c
+++ b/stack/gap/gap_conn.c
@@ -1173,10 +1173,10 @@
     /* Drop any buffers we may be holding */
     p_ccb->rx_queue_size = 0;
 
-    while (p_ccb->rx_queue.p_first)
+    while (!GKI_queue_is_empty(&p_ccb->rx_queue))
         GKI_freebuf (GKI_dequeue (&p_ccb->rx_queue));
 
-    while (p_ccb->tx_queue.p_first)
+    while (!GKI_queue_is_empty(&p_ccb->tx_queue))
         GKI_freebuf (GKI_dequeue (&p_ccb->tx_queue));
 
     p_ccb->con_state = GAP_CCB_STATE_IDLE;
diff --git a/stack/gatt/gatt_auth.c b/stack/gatt/gatt_auth.c
index 10cf76e..ae52145 100644
--- a/stack/gatt/gatt_auth.c
+++ b/stack/gatt/gatt_auth.c
@@ -192,7 +192,7 @@
             gatt_sec_check_complete(status , p_buf->p_clcb, p_tcb->sec_act);
             GKI_freebuf(p_buf);
             /* start all other pending operation in queue */
-            count = p_tcb->pending_enc_clcb.count;
+            count = GKI_queue_length(&p_tcb->pending_enc_clcb);
             for (; count > 0; count --)
             {
                 if ((p_buf = (tGATT_PENDING_ENC_CLCB *)GKI_dequeue (&p_tcb->pending_enc_clcb)) != NULL)
@@ -246,7 +246,7 @@
         {
             gatt_set_sec_act(p_tcb, GATT_SEC_NONE);
 
-            count = p_tcb->pending_enc_clcb.count;
+            count = GKI_queue_length(&p_tcb->pending_enc_clcb);
 
             for (; count > 0; count --)
             {
diff --git a/stack/gatt/gatt_db.c b/stack/gatt/gatt_db.c
index b7887c9..b12ff52 100644
--- a/stack/gatt/gatt_db.c
+++ b/stack/gatt/gatt_db.c
@@ -62,6 +62,8 @@
 BOOLEAN gatts_init_service_db (tGATT_SVC_DB *p_db, tBT_UUID *p_service,  BOOLEAN is_pri,
                                UINT16 s_hdl, UINT16 num_handle)
 {
+    GKI_init_q(&p_db->svc_buffer);
+
     if (!allocate_svc_db_buf(p_db))
     {
         GATT_TRACE_ERROR("gatts_init_service_db failed, no resources");
diff --git a/stack/gatt/gatt_main.c b/stack/gatt/gatt_main.c
index 3d96035..f0a0229 100644
--- a/stack/gatt/gatt_main.c
+++ b/stack/gatt/gatt_main.c
@@ -102,6 +102,8 @@
 #endif
     gatt_cb.def_mtu_size = GATT_DEF_BLE_MTU_SIZE;
     GKI_init_q (&gatt_cb.sign_op_queue);
+    GKI_init_q (&gatt_cb.srv_chg_clt_q);
+    GKI_init_q (&gatt_cb.pending_new_srv_start_q);
     /* First, register fixed L2CAP channel for ATT over BLE */
     fixed_reg.fixed_chnl_opts.mode         = L2CAP_FCR_BASIC_MODE;
     fixed_reg.fixed_chnl_opts.max_transmit = 0xFF;
diff --git a/stack/gatt/gatt_sr.c b/stack/gatt/gatt_sr.c
index 2199794..5f9ddf0 100755
--- a/stack/gatt/gatt_sr.c
+++ b/stack/gatt/gatt_sr.c
@@ -106,7 +106,7 @@
         GKI_freebuf (p_tcb->sr_cmd.p_rsp_msg);
     }
 
-    while (p_tcb->sr_cmd.multi_rsp_q.p_first)
+    while (GKI_getfirst(&p_tcb->sr_cmd.multi_rsp_q))
         GKI_freebuf (GKI_dequeue (&p_tcb->sr_cmd.multi_rsp_q));
     memset( &p_tcb->sr_cmd, 0, sizeof(tGATT_SR_CMD));
 }
@@ -145,9 +145,9 @@
     if (status == GATT_SUCCESS)
     {
         GATT_TRACE_DEBUG ("Multi read count=%d num_hdls=%d",
-                           p_cmd->multi_rsp_q.count, p_cmd->multi_req.num_handles);
+                           GKI_queue_length(&p_cmd->multi_rsp_q), p_cmd->multi_req.num_handles);
         /* Wait till we get all the responses */
-        if (p_cmd->multi_rsp_q.count == p_cmd->multi_req.num_handles)
+        if (GKI_queue_length(&p_cmd->multi_rsp_q) == p_cmd->multi_req.num_handles)
         {
             len = sizeof(BT_HDR) + L2CAP_MIN_OFFSET + mtu;
             if ((p_buf = (BT_HDR *)GKI_getbuf(len)) == NULL)
diff --git a/stack/gatt/gatt_utils.c b/stack/gatt/gatt_utils.c
index 0e841a9..f0658ea 100644
--- a/stack/gatt/gatt_utils.c
+++ b/stack/gatt/gatt_utils.c
@@ -93,7 +93,7 @@
 {
     GATT_TRACE_DEBUG("gatt_free_pending_ind");
     /* release all queued indications */
-    while (p_tcb->pending_ind_q.p_first)
+    while (!GKI_queue_is_empty(&p_tcb->pending_ind_q))
         GKI_freebuf (GKI_dequeue (&p_tcb->pending_ind_q));
 }
 
@@ -110,7 +110,7 @@
 {
     GATT_TRACE_DEBUG("gatt_free_pending_enc_queue");
     /* release all queued indications */
-    while (p_tcb->pending_enc_clcb.p_first)
+    while (!GKI_queue_is_empty(&p_tcb->pending_enc_clcb))
         GKI_freebuf (GKI_dequeue (&p_tcb->pending_enc_clcb));
 }
 
@@ -373,7 +373,7 @@
 
     if (p)
     {
-        while (p->svc_db.svc_buffer.p_first)
+        while (!GKI_queue_is_empty(&p->svc_db.svc_buffer))
             GKI_freebuf (GKI_dequeue (&p->svc_db.svc_buffer));
         memset(p, 0, sizeof(tGATT_HDL_LIST_ELEM));
     }
@@ -397,7 +397,7 @@
     {
         if (memcmp(p_app_id, &p_elem->asgn_range.app_uuid128, sizeof(tBT_UUID)) == 0)
         {
-            while (p_elem->svc_db.svc_buffer.p_first)
+            while (!GKI_queue_is_empty(&p_elem->svc_db.svc_buffer))
                 GKI_freebuf (GKI_dequeue (&p_elem->svc_db.svc_buffer));
 
             p_elem->svc_db.mem_free = 0;
@@ -1363,7 +1363,7 @@
             p_sreg->e_hdl               = p_list->asgn_range.e_handle;
             p_sreg->p_db                = &p_list->svc_db;
 
-            GATT_TRACE_DEBUG ("total GKI buffer in db [%d]",p_sreg->p_db->svc_buffer.count);
+            GATT_TRACE_DEBUG ("total GKI buffer in db [%d]",GKI_queue_length(&p_sreg->p_db->svc_buffer));
             break;
         }
     }
diff --git a/stack/l2cap/l2c_api.c b/stack/l2cap/l2c_api.c
index 3d228d8..be7829c 100644
--- a/stack/l2cap/l2c_api.c
+++ b/stack/l2cap/l2c_api.c
@@ -1484,7 +1484,7 @@
     {
         L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested \
             xmit_hold_q.count: %u buff_quota: %u", fixed_cid,
-            p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q.count,
+            GKI_queue_length(&p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->xmit_hold_q),
             p_lcb->p_fixed_ccbs[fixed_cid - L2CAP_FIRST_FIXED_CHNL]->buff_quota);
         GKI_freebuf (p_buf);
         return (L2CAP_DW_FAILED);
@@ -1810,7 +1810,7 @@
     if (num_to_flush != L2CAP_FLUSH_CHANS_GET)
     {
         L2CAP_TRACE_API ("L2CA_FlushChannel (FLUSH)  CID: 0x%04x  NumToFlush: %d  QC: %u  pFirst: 0x%08x",
-                           lcid, num_to_flush, p_ccb->xmit_hold_q.count, p_ccb->xmit_hold_q.p_first);
+                           lcid, num_to_flush, GKI_queue_length(&p_ccb->xmit_hold_q), GKI_getfirst(&p_ccb->xmit_hold_q));
     }
     else
     {
@@ -1838,7 +1838,7 @@
         }
 #endif
 
-        p_buf = (BT_HDR *)p_lcb->link_xmit_data_q.p_first;
+        p_buf = (BT_HDR *)GKI_getfirst(&p_lcb->link_xmit_data_q);
 
         /* First flush the number we are asked to flush */
         while ((p_buf != NULL) && (num_to_flush != 0))
@@ -1860,7 +1860,7 @@
     }
 
     /* If needed, flush buffers in the CCB xmit hold queue */
-    while ( (num_to_flush != 0) && (p_ccb->xmit_hold_q.count != 0) )
+    while ( (num_to_flush != 0) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)))
     {
         p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
         if (p_buf)
@@ -1874,7 +1874,7 @@
         (*p_ccb->p_rcb->api.pL2CA_TxComplete_Cb)(p_ccb->local_cid, num_flushed2);
 
     /* Now count how many are left */
-    p_buf = (BT_HDR *)p_lcb->link_xmit_data_q.p_first;
+    p_buf = (BT_HDR *)GKI_getfirst(&p_lcb->link_xmit_data_q);
 
     while (p_buf != NULL)
     {
@@ -1885,7 +1885,7 @@
     }
 
     /* Add in the number in the CCB xmit queue */
-    num_left += p_ccb->xmit_hold_q.count;
+    num_left += GKI_queue_length(&p_ccb->xmit_hold_q);
 
     /* Return the local number of buffers left for the CID */
     L2CAP_TRACE_DEBUG ("L2CA_FlushChannel()  flushed: %u + %u,  num_left: %u", num_flushed1, num_flushed2, num_left);
diff --git a/stack/l2cap/l2c_ble.c b/stack/l2cap/l2c_ble.c
index d3ba6e2..f9c75e2 100644
--- a/stack/l2cap/l2c_ble.c
+++ b/stack/l2cap/l2c_ble.c
@@ -915,7 +915,7 @@
             /* this link may have sent anything but some other link sent packets so  */
             /* so we may need a timer to kick off this link's transmissions.         */
             if ( (p_lcb->link_state == LST_CONNECTED)
-              && (p_lcb->link_xmit_data_q.count)
+              && (GKI_queue_length(&p_lcb->link_xmit_data_q))
               && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) )
                 btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT);
         }
diff --git a/stack/l2cap/l2c_csm.c b/stack/l2cap/l2c_csm.c
index fa261b2..5bf268f 100644
--- a/stack/l2cap/l2c_csm.c
+++ b/stack/l2cap/l2c_csm.c
@@ -789,7 +789,7 @@
                 p_ccb->fcrb.connect_tick_count = GKI_get_os_tick_count();
 #endif
                 /* See if we can forward anything on the hold queue */
-                if (p_ccb->xmit_hold_q.count)
+                if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))
                 {
                     l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
                 }
@@ -872,7 +872,7 @@
 #endif
 
         /* See if we can forward anything on the hold queue */
-        if ( (p_ccb->chnl_state == CST_OPEN) && (p_ccb->xmit_hold_q.count) )
+        if ( (p_ccb->chnl_state == CST_OPEN) && (!GKI_queue_is_empty(&p_ccb->xmit_hold_q)))
         {
             l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
         }
diff --git a/stack/l2cap/l2c_fcr.c b/stack/l2cap/l2c_fcr.c
index 6e12607..334730d 100644
--- a/stack/l2cap/l2c_fcr.c
+++ b/stack/l2cap/l2c_fcr.c
@@ -233,13 +233,13 @@
     if (p_fcrb->p_rx_sdu)
         GKI_freebuf (p_fcrb->p_rx_sdu);
 
-    while (p_fcrb->waiting_for_ack_q.p_first)
+    while (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q))
         GKI_freebuf (GKI_dequeue (&p_fcrb->waiting_for_ack_q));
 
-    while (p_fcrb->srej_rcv_hold_q.p_first)
+    while (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q))
         GKI_freebuf (GKI_dequeue (&p_fcrb->srej_rcv_hold_q));
 
-    while (p_fcrb->retrans_q.p_first)
+    while (!GKI_queue_is_empty(&p_fcrb->retrans_q))
         GKI_freebuf (GKI_dequeue (&p_fcrb->retrans_q));
 
     btu_stop_quick_timer (&p_fcrb->ack_timer);
@@ -390,10 +390,10 @@
     {
         /* Check if remote side flowed us off or the transmit window is full */
         if ( (p_ccb->fcrb.remote_busy == TRUE)
-         ||  (p_ccb->fcrb.waiting_for_ack_q.count >= p_ccb->peer_cfg.fcr.tx_win_sz) )
+         ||  (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q) >= p_ccb->peer_cfg.fcr.tx_win_sz) )
         {
 #if (L2CAP_ERTM_STATS == TRUE)
-            if (p_ccb->xmit_hold_q.count != 0)
+            if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))
             {
                 p_ccb->fcrb.xmit_window_closed++;
 
@@ -699,7 +699,7 @@
 
     L2CAP_TRACE_EVENT ("      eRTM Rx Nxt_tx_seq %u, Lst_rx_ack %u, Nxt_seq_exp %u, Lst_ack_snt %u, wt_q.cnt %u, tries %u",
                         p_ccb->fcrb.next_tx_seq, p_ccb->fcrb.last_rx_ack, p_ccb->fcrb.next_seq_expected,
-                        p_ccb->fcrb.last_ack_sent, p_ccb->fcrb.waiting_for_ack_q.count, p_ccb->fcrb.num_tries);
+                        p_ccb->fcrb.last_ack_sent, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q), p_ccb->fcrb.num_tries);
 
 #endif /* BT_TRACE_VERBOSE */
 
@@ -768,7 +768,7 @@
         if (ctrl_word & L2CAP_FCR_S_FRAME_BIT)
             ctrl_word &= ~L2CAP_FCR_P_BIT;
 
-        if (p_ccb->fcrb.waiting_for_ack_q.count == 0)
+        if (GKI_queue_is_empty(&p_ccb->fcrb.waiting_for_ack_q))
             p_ccb->fcrb.num_tries = 0;
 
         l2c_fcr_stop_timer (p_ccb);
@@ -797,7 +797,7 @@
         return;
 
     /* If we have some buffers held while doing SREJ, and SREJ has cleared, process them now */
-    if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) && (p_ccb->fcrb.srej_rcv_hold_q.count > 0) )
+    if ( (!p_ccb->fcrb.local_busy) && (!p_ccb->fcrb.srej_sent) && (!GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q)))
     {
         BUFFER_Q temp_q = p_ccb->fcrb.srej_rcv_hold_q;
 
@@ -845,7 +845,7 @@
     }
 
     /* If a window has opened, check if we can send any more packets */
-    if ( (p_ccb->fcrb.retrans_q.count || p_ccb->xmit_hold_q.count)
+    if ( (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q) || !GKI_queue_is_empty(&p_ccb->xmit_hold_q))
       && (p_ccb->fcrb.wait_ack == FALSE)
       && (l2c_fcr_is_flow_controlled (p_ccb) == FALSE) )
     {
@@ -866,7 +866,7 @@
 {
     L2CAP_TRACE_DEBUG ("l2c_fcr_proc_tout:  CID: 0x%04x  num_tries: %u (max: %u)  wait_ack: %u  ack_q_count: %u",
                         p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit,
-                        p_ccb->fcrb.wait_ack, p_ccb->fcrb.waiting_for_ack_q.count);
+                        p_ccb->fcrb.wait_ack, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q));
 
 #if (L2CAP_ERTM_STATS == TRUE)
     p_ccb->fcrb.retrans_touts++;
@@ -939,7 +939,7 @@
      &&  ((ctrl_word & L2CAP_FCR_P_BIT) == 0) )
     {
         /* If anything still waiting for ack, restart the timer if it was stopped */
-        if (p_fcrb->waiting_for_ack_q.count)
+        if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q))
             l2c_fcr_start_timer (p_ccb);
 
         return (TRUE);
@@ -951,11 +951,11 @@
     num_bufs_acked = (req_seq - p_fcrb->last_rx_ack) & L2CAP_FCR_SEQ_MODULO;
 
     /* Verify the request sequence is in range before proceeding */
-    if (num_bufs_acked > p_fcrb->waiting_for_ack_q.count)
+    if (num_bufs_acked > GKI_queue_length(&p_fcrb->waiting_for_ack_q))
     {
         /* The channel is closed if ReqSeq is not in range */
         L2CAP_TRACE_WARNING ("L2CAP eRTM Frame BAD Req_Seq - ctrl_word: 0x%04x  req_seq 0x%02x  last_rx_ack: 0x%02x  QCount: %u",
-                               ctrl_word, req_seq, p_fcrb->last_rx_ack, p_fcrb->waiting_for_ack_q.count);
+                               ctrl_word, req_seq, p_fcrb->last_rx_ack, GKI_queue_length(&p_fcrb->waiting_for_ack_q));
 
         l2cu_disconnect_chnl (p_ccb);
         return (FALSE);
@@ -979,7 +979,7 @@
 
         for (xx = 0; xx < num_bufs_acked; xx++)
         {
-            ls = ((BT_HDR *)(p_fcrb->waiting_for_ack_q.p_first))->layer_specific & L2CAP_FCR_SAR_BITS;
+            ls = ((BT_HDR *)(GKI_getfirst(&p_fcrb->waiting_for_ack_q)))->layer_specific & L2CAP_FCR_SAR_BITS;
 
             if ( (ls == L2CAP_FCR_UNSEG_SDU) || (ls == L2CAP_FCR_END_SDU) )
                 full_sdus_xmitted++;
@@ -995,7 +995,7 @@
         if ( (p_ccb->p_rcb) && (p_ccb->p_rcb->api.pL2CA_TxComplete_Cb) && (full_sdus_xmitted) )
         {
             /* Special case for eRTM, if all packets sent, send 0xFFFF */
-            if ( (p_fcrb->waiting_for_ack_q.count == 0) && (p_ccb->xmit_hold_q.count == 0) )
+            if (GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q) && (GKI_queue_is_empty(&p_ccb->xmit_hold_q)))
                 full_sdus_xmitted = 0xFFFF;
 
             (*p_ccb->p_rcb->api.pL2CA_TxComplete_Cb)(p_ccb->local_cid, full_sdus_xmitted);
@@ -1003,7 +1003,7 @@
     }
 
     /* If anything still waiting for ack, restart the timer if it was stopped */
-    if (p_fcrb->waiting_for_ack_q.count)
+    if (!GKI_queue_is_empty(&p_fcrb->waiting_for_ack_q))
         l2c_fcr_start_timer (p_ccb);
 
     return (TRUE);
@@ -1165,9 +1165,9 @@
             if (p_fcrb->srej_sent)
             {
                 /* If SREJ sent, save the frame for later processing as long as it is in sequence */
-                next_srej = (((BT_HDR *)p_fcrb->srej_rcv_hold_q.p_last)->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO;
+                next_srej = (((BT_HDR *)GKI_getlast(&p_fcrb->srej_rcv_hold_q))->layer_specific + 1) & L2CAP_FCR_SEQ_MODULO;
 
-                if ( (tx_seq == next_srej) && (p_fcrb->srej_rcv_hold_q.count < p_ccb->our_cfg.fcr.tx_win_sz) )
+                if ( (tx_seq == next_srej) && (GKI_queue_length(&p_fcrb->srej_rcv_hold_q) < p_ccb->our_cfg.fcr.tx_win_sz) )
                 {
                     /* If user gave us a pool for held rx buffers, use that */
                     if (p_ccb->ertm_info.fcr_rx_pool_id != HCI_ACL_POOL_ID)
@@ -1197,7 +1197,7 @@
                 else
                 {
                     L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x  frame dropped in Srej Sent next_srej:%u  hold_q.count:%u  win_sz:%u",
-                                         p_ccb->local_cid, next_srej, p_fcrb->srej_rcv_hold_q.count, p_ccb->our_cfg.fcr.tx_win_sz);
+                                         p_ccb->local_cid, next_srej, GKI_queue_length(&p_fcrb->srej_rcv_hold_q), p_ccb->our_cfg.fcr.tx_win_sz);
 
                     p_fcrb->rej_after_srej = TRUE;
                     GKI_freebuf (p_buf);
@@ -1225,10 +1225,10 @@
                 }
                 else
                 {
-                    if (p_fcrb->srej_rcv_hold_q.count != 0)
+                    if (!GKI_queue_is_empty(&p_fcrb->srej_rcv_hold_q))
                     {
                         L2CAP_TRACE_ERROR ("process_i_frame() CID: 0x%04x  sending SREJ tx_seq:%d hold_q.count:%u",
-                                             p_ccb->local_cid, tx_seq, p_fcrb->srej_rcv_hold_q.count);
+                                             p_ccb->local_cid, tx_seq, GKI_queue_length(&p_fcrb->srej_rcv_hold_q));
                     }
                     p_buf->layer_specific = tx_seq;
                     GKI_enqueue (&p_fcrb->srej_rcv_hold_q, p_buf);
@@ -1275,8 +1275,8 @@
                                         (L2CAP_FCR_ACK_TOUT*QUICK_TIMER_TICKS_PER_SEC)/1000);
             }
         }
-        else if ( ((p_ccb->xmit_hold_q.count == 0) || (l2c_fcr_is_flow_controlled (p_ccb)))
-               &&  (p_ccb->fcrb.srej_rcv_hold_q.count == 0) )
+        else if ( ((GKI_queue_is_empty(&p_ccb->xmit_hold_q)) || (l2c_fcr_is_flow_controlled (p_ccb)))
+               &&  (GKI_queue_is_empty(&p_ccb->fcrb.srej_rcv_hold_q)))
         {
             if (p_fcrb->local_busy)
                 l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_RNR, 0);
@@ -1517,13 +1517,13 @@
     UINT8       buf_seq;
     UINT16      ctrl_word;
 
-    if ( (p_ccb->fcrb.waiting_for_ack_q.p_first)
+    if ( (GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q))
      &&  (p_ccb->peer_cfg.fcr.max_transmit != 0)
      &&  (p_ccb->fcrb.num_tries >= p_ccb->peer_cfg.fcr.max_transmit) )
     {
         L2CAP_TRACE_EVENT ("Max Tries Exceeded:  (last_acq: %d  CID: 0x%04x  num_tries: %u (max: %u) ack_q_count: %u",
                 p_ccb->fcrb.last_rx_ack, p_ccb->local_cid, p_ccb->fcrb.num_tries, p_ccb->peer_cfg.fcr.max_transmit,
-                p_ccb->fcrb.waiting_for_ack_q.count);
+                GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q));
 
         l2cu_disconnect_chnl (p_ccb);
         return (FALSE);
@@ -1534,7 +1534,7 @@
     {
         /* If sending only one, the sequence number tells us which one. Look for it.
         */
-        for (p_buf = (BT_HDR *)p_ccb->fcrb.waiting_for_ack_q.p_first; p_buf; p_buf = (BT_HDR *)GKI_getnext (p_buf))
+        for (p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q); p_buf; p_buf = (BT_HDR *)GKI_getnext (p_buf))
         {
             /* Get the old control word */
             p = ((UINT8 *) (p_buf+1)) + p_buf->offset + L2CAP_PKT_OVERHEAD;
@@ -1551,7 +1551,7 @@
 
         if (!p_buf)
         {
-            L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u  q_count: %u", tx_seq, p_ccb->fcrb.waiting_for_ack_q.count);
+            L2CAP_TRACE_ERROR ("retransmit_i_frames() UNKNOWN seq: %u  q_count: %u", tx_seq, GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q));
             return (TRUE);
         }
     }
@@ -1559,7 +1559,7 @@
     {
         /* Retransmitting everything. Flush buffers we already put in the link xmit queue.
         */
-        p_buf = (BT_HDR *)p_ccb->p_lcb->link_xmit_data_q.p_first;
+        p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->p_lcb->link_xmit_data_q);
 
         while (p_buf != NULL)
         {
@@ -1577,10 +1577,10 @@
         }
 
         /* Also flush our retransmission queue */
-        while (p_ccb->fcrb.retrans_q.p_first)
+        while (!GKI_queue_is_empty(&p_ccb->fcrb.retrans_q))
             GKI_freebuf (GKI_dequeue (&p_ccb->fcrb.retrans_q));
 
-        p_buf = (BT_HDR *)p_ccb->fcrb.waiting_for_ack_q.p_first;
+        p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->fcrb.waiting_for_ack_q);
     }
 
     while (p_buf != NULL)
@@ -1602,7 +1602,7 @@
 
     l2c_link_check_send_pkts (p_ccb->p_lcb, NULL, NULL);
 
-    if (p_ccb->fcrb.waiting_for_ack_q.count)
+    if (GKI_queue_length(&p_ccb->fcrb.waiting_for_ack_q))
     {
         p_ccb->fcrb.num_tries++;
         l2c_fcr_start_timer (p_ccb);
@@ -1633,7 +1633,7 @@
 
     /* If there is anything in the retransmit queue, that goes first
     */
-    if (p_ccb->fcrb.retrans_q.p_first)
+    if (GKI_getfirst(&p_ccb->fcrb.retrans_q))
     {
         p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->fcrb.retrans_q);
 
@@ -1668,7 +1668,7 @@
         max_pdu = max_packet_length - L2CAP_MAX_HEADER_FCS;
     }
 
-    p_buf = (BT_HDR *)p_ccb->xmit_hold_q.p_first;
+    p_buf = (BT_HDR *)GKI_getfirst(&p_ccb->xmit_hold_q);
 
     /* If there is more data than the MPS, it requires segmentation */
     if (p_buf->len > max_pdu)
diff --git a/stack/l2cap/l2c_link.c b/stack/l2cap/l2c_link.c
index 240ead3..55978a2 100644
--- a/stack/l2cap/l2c_link.c
+++ b/stack/l2cap/l2c_link.c
@@ -803,7 +803,7 @@
             /* this link may have sent anything but some other link sent packets so  */
             /* so we may need a timer to kick off this link's transmissions.         */
             if ( (p_lcb->link_state == LST_CONNECTED)
-              && (p_lcb->link_xmit_data_q.count)
+              && (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q))
               && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) )
                 btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT);
         }
@@ -1066,11 +1066,11 @@
     /*
      * We only switch park to active only if we have unsent packets
      */
-    if ( p_lcb->link_xmit_data_q.count == 0 )
+    if ( GKI_queue_is_empty(&p_lcb->link_xmit_data_q))
     {
         for (p_ccb = p_lcb->ccb_queue.p_first_ccb; p_ccb; p_ccb = p_ccb->p_next_ccb)
         {
-            if (p_ccb->xmit_hold_q.count != 0)
+            if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))
             {
                 need_to_active = TRUE;
                 break;
@@ -1261,7 +1261,7 @@
         /* There is a special case where we have readjusted the link quotas and  */
         /* this link may have sent anything but some other link sent packets so  */
         /* so we may need a timer to kick off this link's transmissions.         */
-        if ( (p_lcb->link_xmit_data_q.count) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) )
+        if ( (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q)) && (p_lcb->sent_not_acked < p_lcb->link_xmit_quota) )
             btu_start_timer (&p_lcb->timer_entry, BTU_TTYPE_L2CAP_LINK, L2CAP_LINK_FLOW_CONTROL_TOUT);
     }
 
diff --git a/stack/l2cap/l2c_main.c b/stack/l2cap/l2c_main.c
index 325f7af..1b45713 100755
--- a/stack/l2cap/l2c_main.c
+++ b/stack/l2cap/l2c_main.c
@@ -152,11 +152,11 @@
             {
                 L2CAP_TRACE_WARNING ("L2CAP - holding ACL for unknown handle:%d ls:%d cid:%d opcode:%d cur count:%d",
                                     handle, p_msg->layer_specific, rcv_cid, cmd_code,
-                                    l2cb.rcv_hold_q.count);
+                                    GKI_queue_length(&l2cb.rcv_hold_q));
                 p_msg->layer_specific = 2;
                 GKI_enqueue (&l2cb.rcv_hold_q, p_msg);
 
-                if (l2cb.rcv_hold_q.count == 1)
+                if (GKI_queue_length(&l2cb.rcv_hold_q) == 1)
                     btu_start_timer (&l2cb.rcv_hold_tle, BTU_TTYPE_L2CAP_HOLD, BT_1SEC_TIMEOUT);
 
                 return;
@@ -164,7 +164,7 @@
             else
             {
                 L2CAP_TRACE_ERROR ("L2CAP - rcvd ACL for unknown handle:%d ls:%d cid:%d opcode:%d cur count:%d",
-                                    handle, p_msg->layer_specific, rcv_cid, cmd_code, l2cb.rcv_hold_q.count);
+                                    handle, p_msg->layer_specific, rcv_cid, cmd_code, GKI_queue_length(&l2cb.rcv_hold_q));
             }
             GKI_freebuf (p_msg);
             return;
@@ -816,7 +816,7 @@
     BT_HDR      *p_buf, *p_buf1;
     BUFFER_Q    *p_rcv_hold_q = &l2cb.rcv_hold_q;
 
-    if (!p_rcv_hold_q->count)
+    if (GKI_queue_is_empty(p_rcv_hold_q))
         return;
 
     if (!timed_out)
@@ -842,7 +842,7 @@
     }
 
     /* If anyone still in the queue, restart the timeout */
-    if (p_rcv_hold_q->count)
+    if (!GKI_queue_is_empty(p_rcv_hold_q))
         btu_start_timer (&l2cb.rcv_hold_tle, BTU_TTYPE_L2CAP_HOLD, BT_1SEC_TIMEOUT);
 }
 
@@ -984,7 +984,7 @@
     if (p_ccb->cong_sent)
     {
         L2CAP_TRACE_ERROR ("L2CAP - CID: 0x%04x cannot send, already congested  xmit_hold_q.count: %u  buff_quota: %u",
-                            p_ccb->local_cid, p_ccb->xmit_hold_q.count, p_ccb->buff_quota);
+                            p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q), p_ccb->buff_quota);
 
         GKI_freebuf (p_data);
         return (L2CAP_DW_FAILED);
diff --git a/stack/l2cap/l2c_utils.c b/stack/l2cap/l2c_utils.c
index 77849ac..2d54ca5 100644
--- a/stack/l2cap/l2c_utils.c
+++ b/stack/l2cap/l2c_utils.c
@@ -210,7 +210,7 @@
         btm_acl_removed (p_lcb->remote_bd_addr, BT_TRANSPORT_BR_EDR);
 #endif
     /* Release any held buffers */
-    while (p_lcb->link_xmit_data_q.p_first)
+    while (!GKI_queue_is_empty(&p_lcb->link_xmit_data_q))
         GKI_freebuf (GKI_dequeue (&p_lcb->link_xmit_data_q));
 
 #if (L2CAP_UCD_INCLUDED == TRUE)
@@ -934,7 +934,7 @@
     */
     if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE)
     {
-        while (p_ccb->xmit_hold_q.p_first)
+        while (GKI_getfirst(&p_ccb->xmit_hold_q))
         {
             p_buf2 = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
             l2cu_set_acl_hci_header (p_buf2, p_ccb);
@@ -1685,7 +1685,7 @@
     /* Stop the timer */
     btu_stop_timer (&p_ccb->timer_entry);
 
-    while (p_ccb->xmit_hold_q.p_first)
+    while (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))
         GKI_freebuf (GKI_dequeue (&p_ccb->xmit_hold_q));
 
     l2c_fcr_cleanup (p_ccb);
@@ -3108,7 +3108,7 @@
             }
 
             L2CAP_TRACE_DEBUG("RR scan pri=%d, lcid=0x%04x, q_cout=%d",
-                                p_ccb->ccb_priority, p_ccb->local_cid, p_ccb->xmit_hold_q.count );
+                                p_ccb->ccb_priority, p_ccb->local_cid, GKI_queue_length(&p_ccb->xmit_hold_q));
 
             /* store the next serving channel */
             /* this channel is the last channel of its priority group */
@@ -3133,9 +3133,9 @@
                 if (p_ccb->fcrb.wait_ack || p_ccb->fcrb.remote_busy)
                     continue;
 
-                if ( p_ccb->fcrb.retrans_q.count == 0 )
+                if ( GKI_queue_is_empty(&p_ccb->fcrb.retrans_q))
                 {
-                    if ( p_ccb->xmit_hold_q.count == 0 )
+                    if ( GKI_queue_is_empty(&p_ccb->xmit_hold_q))
                         continue;
 
                     /* If using the common pool, should be at least 10% free. */
@@ -3149,7 +3149,7 @@
             }
             else
             {
-                if (p_ccb->xmit_hold_q.count == 0)
+                if (GKI_queue_is_empty(&p_ccb->xmit_hold_q))
                     continue;
             }
 
@@ -3259,9 +3259,9 @@
                 continue;
 
             /* No more checks needed if sending from the reatransmit queue */
-            if (p_ccb->fcrb.retrans_q.count == 0)
+            if (GKI_queue_is_empty(&p_ccb->fcrb.retrans_q))
             {
-                if (p_ccb->xmit_hold_q.count == 0)
+                if (GKI_queue_is_empty(&p_ccb->xmit_hold_q))
                     continue;
 
                 /* If using the common pool, should be at least 10% free. */
@@ -3282,7 +3282,7 @@
         }
         else
         {
-            if (p_ccb->xmit_hold_q.count != 0)
+            if (!GKI_queue_is_empty(&p_ccb->xmit_hold_q))
             {
                 p_buf = (BT_HDR *)GKI_dequeue (&p_ccb->xmit_hold_q);
                 if(NULL == p_buf)
@@ -3408,7 +3408,7 @@
 *******************************************************************************/
 void l2cu_check_channel_congestion (tL2C_CCB *p_ccb)
 {
-    UINT16 q_count = p_ccb->xmit_hold_q.count;
+    UINT16 q_count = GKI_queue_length(&p_ccb->xmit_hold_q);
 
 #if (L2CAP_UCD_INCLUDED == TRUE)
     if ( p_ccb->local_cid == L2CAP_CONNECTIONLESS_CID )
diff --git a/stack/rfcomm/port_api.c b/stack/rfcomm/port_api.c
index 3643088..83c94da 100644
--- a/stack/rfcomm/port_api.c
+++ b/stack/rfcomm/port_api.c
@@ -1132,7 +1132,7 @@
     {
         PORT_SCHEDULE_LOCK;  /* to prevent missing credit */
 
-        count = p_port->rx.queue.count;
+        count = GKI_queue_length(&p_port->rx.queue);
 
         while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue)) != NULL)
             GKI_freebuf (p_buf);
@@ -1368,7 +1368,7 @@
                               (PORT_CTRL_REQ_SENT | PORT_CTRL_IND_RECEIVED)))
     {
         if ((p_port->tx.queue_size  > PORT_TX_CRITICAL_WM)
-         || (p_port->tx.queue.count > PORT_TX_BUF_CRITICAL_WM))
+         || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_CRITICAL_WM))
         {
             RFCOMM_TRACE_WARNING ("PORT_Write: Queue size: %d",
                                    p_port->tx.queue_size);
@@ -1526,7 +1526,7 @@
     /* data fits into the end of the queue */
     PORT_SCHEDULE_LOCK;
 
-    if (((p_buf = (BT_HDR *)p_port->tx.queue.p_last) != NULL)
+    if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL)
      && (((int)p_buf->len + available) <= (int)p_port->peer_mtu)
      && (((int)p_buf->len + available) <= (int)length))
     {
@@ -1560,12 +1560,12 @@
     {
         /* if we're over buffer high water mark, we're done */
         if ((p_port->tx.queue_size  > PORT_TX_HIGH_WM)
-         || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM))
+         || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM))
         {
             port_flow_control_user(p_port);
             event |= PORT_EV_FC;
             debug("tx queue is full,tx.queue_size:%d,tx.queue.count:%d,available:%d",
-                    p_port->tx.queue_size, p_port->tx.queue.count, available);
+                    p_port->tx.queue_size, GKI_queue_length(&p_port->tx.queue), available);
             break;
          }
 
@@ -1677,7 +1677,7 @@
     /* data fits into the end of the queue */
     PORT_SCHEDULE_LOCK;
 
-    if (((p_buf = (BT_HDR *)p_port->tx.queue.p_last) != NULL)
+    if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL)
      && ((p_buf->len + max_len) <= p_port->peer_mtu)
      && ((p_buf->len + max_len) <= length))
     {
@@ -1698,7 +1698,7 @@
     {
         /* if we're over buffer high water mark, we're done */
         if ((p_port->tx.queue_size  > PORT_TX_HIGH_WM)
-         || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM))
+         || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM))
             break;
 
         /* continue with rfcomm data write */
diff --git a/stack/rfcomm/port_rfc.c b/stack/rfcomm/port_rfc.c
index 5704c01..3fa3013 100644
--- a/stack/rfcomm/port_rfc.c
+++ b/stack/rfcomm/port_rfc.c
@@ -865,7 +865,7 @@
 
     /* Check if rx queue exceeds the limit */
     if ((p_port->rx.queue_size + p_buf->len > PORT_RX_CRITICAL_WM)
-     || (p_port->rx.queue.count + 1 > p_port->rx_buf_critical))
+     || (GKI_queue_length(&p_port->rx.queue) + 1 > p_port->rx_buf_critical))
     {
         RFCOMM_TRACE_EVENT ("PORT_DataInd. Buffer over run. Dropping the buffer");
         GKI_freebuf (p_buf);
diff --git a/stack/rfcomm/port_utils.c b/stack/rfcomm/port_utils.c
index 642ddb8..7d29336 100644
--- a/stack/rfcomm/port_utils.c
+++ b/stack/rfcomm/port_utils.c
@@ -420,7 +420,7 @@
               || !p_port->rfc.p_mcb
               || !p_port->rfc.p_mcb->peer_ready
               || (p_port->tx.queue_size > PORT_TX_HIGH_WM)
-              || (p_port->tx.queue.count > PORT_TX_BUF_HIGH_WM);
+              || (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM);
 
     if (p_port->tx.user_fc == fc)
         return (0);
@@ -536,7 +536,7 @@
                 p_port->rx.peer_fc = TRUE;
             }
             /* if queue count reached credit rx max, set peer fc */
-            else if (p_port->rx.queue.count >= p_port->credit_rx_max)
+            else if (GKI_queue_length(&p_port->rx.queue) >= p_port->credit_rx_max)
             {
                 p_port->rx.peer_fc = TRUE;
             }
@@ -552,7 +552,7 @@
             /* check if it can be resumed now */
             if (p_port->rx.peer_fc
              && (p_port->rx.queue_size < PORT_RX_LOW_WM)
-             && (p_port->rx.queue.count < PORT_RX_BUF_LOW_WM))
+             && (GKI_queue_length(&p_port->rx.queue) < PORT_RX_BUF_LOW_WM))
             {
                 p_port->rx.peer_fc = FALSE;
 
@@ -573,7 +573,7 @@
             /* Check the size of the rx queue.  If it exceeds certain */
             /* level and flow control has not been sent to the peer do it now */
             else if ( ((p_port->rx.queue_size > PORT_RX_HIGH_WM)
-                     || (p_port->rx.queue.count > PORT_RX_BUF_HIGH_WM))
+                     || (GKI_queue_length(&p_port->rx.queue) > PORT_RX_BUF_HIGH_WM))
                      && !p_port->rx.peer_fc)
             {
                 RFCOMM_TRACE_EVENT ("PORT_DataInd Data reached HW. Sending FC set.");
diff --git a/stack/rfcomm/rfc_port_fsm.c b/stack/rfcomm/rfc_port_fsm.c
index a998b6e..c0d7fbc 100644
--- a/stack/rfcomm/rfc_port_fsm.c
+++ b/stack/rfcomm/rfc_port_fsm.c
@@ -431,7 +431,7 @@
     case RFC_EVENT_DISC:
         p_port->rfc.state = RFC_STATE_CLOSED;
         rfc_send_ua (p_port->rfc.p_mcb, p_port->dlci);
-        if(p_port->rx.queue.count)
+        if(!GKI_queue_is_empty(&p_port->rx.queue))
         {
             /* give a chance to upper stack to close port properly */
             RFCOMM_TRACE_DEBUG("port queue is not empty");