diff options
author | David Cross <david.cross@cypress.com> | 2010-09-23 17:24:45 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-09-29 18:28:26 -0700 |
commit | 0769c38d26bdb2941385c32ff66a25beeb987f75 (patch) | |
tree | 9bf7d7801a2df098dc3b1aadfe562ce8fc5bb776 | |
parent | 9ebed608bde8e27a1993f1b12eff16e04c8c419e (diff) |
Staging: west bridge, removal of " " before ";"
This patch fixes removes all of the the " ;"'s in the west bridge driver
and instead replaces them with ";" only. Although this is a large patch,
this is the only thing that it does. I can break it up on a file basis
if needed.
Signed-off-by: David Cross <david.cross@cypress.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
38 files changed, 5753 insertions, 5753 deletions
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasdma.c b/drivers/staging/westbridge/astoria/api/src/cyasdma.c index 152591d93ec2..de67e1310503 100644 --- a/drivers/staging/westbridge/astoria/api/src/cyasdma.c +++ b/drivers/staging/westbridge/astoria/api/src/cyasdma.c @@ -32,13 +32,13 @@ static void cy_as_dma_add_request_to_free_queue(cy_as_device *dev_p, cy_as_dma_queue_entry *req_p) { - uint32_t imask ; - imask = cy_as_hal_disable_interrupts() ; + uint32_t imask; + imask = cy_as_hal_disable_interrupts(); - req_p->next_p = dev_p->dma_freelist_p ; - dev_p->dma_freelist_p = req_p ; + req_p->next_p = dev_p->dma_freelist_p; + dev_p->dma_freelist_p = req_p; - cy_as_hal_enable_interrupts(imask) ; + cy_as_hal_enable_interrupts(imask); } /* @@ -47,17 +47,17 @@ cy_as_dma_add_request_to_free_queue(cy_as_device *dev_p, static cy_as_dma_queue_entry * cy_as_dma_get_dma_queue_entry(cy_as_device *dev_p) { - cy_as_dma_queue_entry *req_p ; - uint32_t imask ; + cy_as_dma_queue_entry *req_p; + uint32_t imask; - cy_as_hal_assert(dev_p->dma_freelist_p != 0) ; + cy_as_hal_assert(dev_p->dma_freelist_p != 0); - imask = cy_as_hal_disable_interrupts() ; - req_p = dev_p->dma_freelist_p ; - dev_p->dma_freelist_p = req_p->next_p ; - cy_as_hal_enable_interrupts(imask) ; + imask = cy_as_hal_disable_interrupts(); + req_p = dev_p->dma_freelist_p; + dev_p->dma_freelist_p = req_p->next_p; + cy_as_hal_enable_interrupts(imask); - return req_p ; + return req_p; } /* @@ -74,11 +74,11 @@ cy_as_dma_set_max_dma_size(cy_as_device *dev_p, /* In MTP mode, EP2 is allowed to have all max sizes. */ if ((!dev_p->is_mtp_firmware) || (ep != 0x02)) { if (size < 64 || size > 1024) - return CY_AS_ERROR_INVALID_SIZE ; + return CY_AS_ERROR_INVALID_SIZE; } - CY_AS_NUM_EP(dev_p, ep)->maxhwdata = (uint16_t)size ; - return CY_AS_ERROR_SUCCESS ; + CY_AS_NUM_EP(dev_p, ep)->maxhwdata = (uint16_t)size; + return CY_AS_ERROR_SUCCESS; } /* @@ -96,13 +96,13 @@ cy_as_dma_request_callback( cy_as_ll_request_response *resp_p, cy_as_return_status_t ret) { - uint16_t v ; - uint16_t datacnt ; - cy_as_end_point_number_t ep ; + uint16_t v; + uint16_t datacnt; + cy_as_end_point_number_t ep; - (void)context ; + (void)context; - cy_as_log_debug_message(5, "cy_as_dma_request_callback called") ; + cy_as_log_debug_message(5, "cy_as_dma_request_callback called"); /* * extract the return code from the firmware @@ -110,17 +110,17 @@ cy_as_dma_request_callback( if (ret == CY_AS_ERROR_SUCCESS) { if (cy_as_ll_request_response__get_code(resp_p) != CY_RESP_SUCCESS_FAILURE) - ret = CY_AS_ERROR_INVALID_RESPONSE ; + ret = CY_AS_ERROR_INVALID_RESPONSE; else - ret = cy_as_ll_request_response__get_word(resp_p, 0) ; + ret = cy_as_ll_request_response__get_word(resp_p, 0); } /* * extract the endpoint number and the transferred byte count * from the request. */ - v = cy_as_ll_request_response__get_word(req_p, 0) ; - ep = (cy_as_end_point_number_t)((v >> 13) & 0x01) ; + v = cy_as_ll_request_response__get_word(req_p, 0); + ep = (cy_as_end_point_number_t)((v >> 13) & 0x01); if (ret == CY_AS_ERROR_SUCCESS) { /* @@ -129,12 +129,12 @@ cy_as_dma_request_callback( * transferred. there are no partial * transfers. */ - datacnt = v & 0x3FF ; + datacnt = v & 0x3FF; } else { /* * if the firmware returned an error, no data was transferred. */ - datacnt = 0 ; + datacnt = 0; } /* @@ -142,11 +142,11 @@ cy_as_dma_request_callback( * next EP0 or EP1 request. */ if (ep == 0) { - dev_p->usb_ep0_dma_req = req_p ; - dev_p->usb_ep0_dma_resp = resp_p ; + dev_p->usb_ep0_dma_req = req_p; + dev_p->usb_ep0_dma_resp = resp_p; } else { - dev_p->usb_ep1_dma_req = req_p ; - dev_p->usb_ep1_dma_resp = resp_p ; + dev_p->usb_ep1_dma_req = req_p; + dev_p->usb_ep1_dma_resp = resp_p; } /* @@ -159,7 +159,7 @@ cy_as_dma_request_callback( * function. */ if (ret != CY_AS_ERROR_CANCELED) - cy_as_dma_completed_callback(dev_p->tag, ep, datacnt, ret) ; + cy_as_dma_completed_callback(dev_p->tag, ep, datacnt, ret); } /* @@ -171,35 +171,35 @@ static void cy_as_dma_set_drq(cy_as_device *dev_p, cy_as_end_point_number_t ep, cy_bool state) { - uint16_t mask ; - uint16_t v ; - uint32_t intval ; + uint16_t mask; + uint16_t v; + uint32_t intval; /* * there are not DRQ register bits for EP0 and EP1 */ if (ep == 0 || ep == 1) - return ; + return; /* * disable interrupts while we do this to be sure the state of the * DRQ mask register is always well defined. */ - intval = cy_as_hal_disable_interrupts() ; + intval = cy_as_hal_disable_interrupts(); /* * set the DRQ bit to the given state for the ep given */ - mask = (1 << ep) ; - v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK) ; + mask = (1 << ep); + v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK); if (state) - v |= mask ; + v |= mask; else - v &= ~mask ; + v &= ~mask; - cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK, v) ; - cy_as_hal_enable_interrupts(intval) ; + cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK, v); + cy_as_hal_enable_interrupts(intval); } /* @@ -208,14 +208,14 @@ cy_as_dma_set_drq(cy_as_device *dev_p, static void cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) { - uint32_t datacnt ; - void *buf_p ; - cy_as_dma_queue_entry *dma_p ; + uint32_t datacnt; + void *buf_p; + cy_as_dma_queue_entry *dma_p; - cy_as_log_debug_message(6, "cy_as_dma_send_next_dma_request called") ; + cy_as_log_debug_message(6, "cy_as_dma_send_next_dma_request called"); /* If the queue is empty, nothing to do */ - dma_p = ep_p->queue_p ; + dma_p = ep_p->queue_p; if (dma_p == 0) { /* * there are no pending DMA requests @@ -224,18 +224,18 @@ cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) * will be triggered by this endpoint * until someone is interested in the data. */ - cy_as_dma_set_drq(dev_p, ep_p->ep, cy_false) ; - return ; + cy_as_dma_set_drq(dev_p, ep_p->ep, cy_false); + return; } - cy_as_dma_end_point_set_running(ep_p) ; + cy_as_dma_end_point_set_running(ep_p); /* * get the number of words that still * need to be xferred in this request. */ - datacnt = dma_p->size - dma_p->offset ; - cy_as_hal_assert(datacnt >= 0) ; + datacnt = dma_p->size - dma_p->offset; + cy_as_hal_assert(datacnt >= 0); /* * the HAL layer should never limit the size @@ -244,7 +244,7 @@ cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) * in packets that are not correct in size. */ cy_as_hal_assert(ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE - || ep_p->maxhaldata >= ep_p->maxhwdata) ; + || ep_p->maxhaldata >= ep_p->maxhwdata); /* * update the number of words that need to be xferred yet @@ -252,10 +252,10 @@ cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) */ if (ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE) { if (datacnt > ep_p->maxhwdata) - datacnt = ep_p->maxhwdata ; + datacnt = ep_p->maxhwdata; } else { if (datacnt > ep_p->maxhaldata) - datacnt = ep_p->maxhaldata ; + datacnt = ep_p->maxhaldata; } /* @@ -266,7 +266,7 @@ cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) /* * mark a request in transit */ - cy_as_dma_end_point_set_in_transit(ep_p) ; + cy_as_dma_end_point_set_in_transit(ep_p); if (ep_p->ep == 0 || ep_p->ep == 1) { /* @@ -280,62 +280,62 @@ cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) * the data back into the DMA module. */ if (dma_p->readreq == cy_false) { - uint16_t v ; - uint16_t len ; - cy_as_ll_request_response *resp_p ; - cy_as_ll_request_response *req_p ; - cy_as_return_status_t ret ; + uint16_t v; + uint16_t len; + cy_as_ll_request_response *resp_p; + cy_as_ll_request_response *req_p; + cy_as_return_status_t ret; - len = (uint16_t)(datacnt / 2) ; + len = (uint16_t)(datacnt / 2); if (datacnt % 2) - len++ ; + len++; - len++ ; + len++; if (ep_p->ep == 0) { - req_p = dev_p->usb_ep0_dma_req ; - resp_p = dev_p->usb_ep0_dma_resp ; - dev_p->usb_ep0_dma_req = 0 ; - dev_p->usb_ep0_dma_resp = 0 ; + req_p = dev_p->usb_ep0_dma_req; + resp_p = dev_p->usb_ep0_dma_resp; + dev_p->usb_ep0_dma_req = 0; + dev_p->usb_ep0_dma_resp = 0; } else { - req_p = dev_p->usb_ep1_dma_req ; - resp_p = dev_p->usb_ep1_dma_resp ; - dev_p->usb_ep1_dma_req = 0 ; - dev_p->usb_ep1_dma_resp = 0 ; + req_p = dev_p->usb_ep1_dma_req; + resp_p = dev_p->usb_ep1_dma_resp; + dev_p->usb_ep1_dma_req = 0; + dev_p->usb_ep1_dma_resp = 0; } - cy_as_hal_assert(req_p != 0) ; - cy_as_hal_assert(resp_p != 0) ; - cy_as_hal_assert(len <= 64) ; + cy_as_hal_assert(req_p != 0); + cy_as_hal_assert(resp_p != 0); + cy_as_hal_assert(len <= 64); cy_as_ll_init_request(req_p, CY_RQT_USB_EP_DATA, - CY_RQT_USB_RQT_CONTEXT, len) ; + CY_RQT_USB_RQT_CONTEXT, len); - v = (uint16_t)(datacnt | (ep_p->ep << 13) | (1 << 14)) ; + v = (uint16_t)(datacnt | (ep_p->ep << 13) | (1 << 14)); if (dma_p->offset == 0) - v |= (1 << 12) ;/* Set the first packet bit */ + v |= (1 << 12);/* Set the first packet bit */ if (dma_p->offset + datacnt == dma_p->size) - v |= (1 << 11) ;/* Set the last packet bit */ + v |= (1 << 11);/* Set the last packet bit */ - cy_as_ll_request_response__set_word(req_p, 0, v) ; + cy_as_ll_request_response__set_word(req_p, 0, v); cy_as_ll_request_response__pack(req_p, - 1, datacnt, buf_p) ; + 1, datacnt, buf_p); - cy_as_ll_init_response(resp_p, 1) ; + cy_as_ll_init_response(resp_p, 1); ret = cy_as_ll_send_request(dev_p, req_p, resp_p, - cy_false, cy_as_dma_request_callback) ; + cy_false, cy_as_dma_request_callback); if (ret == CY_AS_ERROR_SUCCESS) cy_as_log_debug_message(5, - "+++ send EP 0/1 data via mailbox registers") ; + "+++ send EP 0/1 data via mailbox registers"); else cy_as_log_debug_message(5, "+++ error sending EP 0/1 data via mailbox " - "registers - CY_AS_ERROR_TIMEOUT") ; + "registers - CY_AS_ERROR_TIMEOUT"); if (ret != CY_AS_ERROR_SUCCESS) cy_as_dma_completed_callback(dev_p->tag, - ep_p->ep, 0, ret) ; + ep_p->ep, 0, ret); } } else { /* @@ -353,16 +353,16 @@ cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) */ if (cy_as_dma_end_point_is_direction_in(ep_p)) cy_as_hal_dma_setup_write(dev_p->tag, - ep_p->ep, buf_p, datacnt, ep_p->maxhwdata) ; + ep_p->ep, buf_p, datacnt, ep_p->maxhwdata); else cy_as_hal_dma_setup_read(dev_p->tag, - ep_p->ep, buf_p, datacnt, ep_p->maxhwdata) ; + ep_p->ep, buf_p, datacnt, ep_p->maxhwdata); /* * the DRQ interrupt for this endpoint should be enabled * so that the data transfer progresses at interrupt time. */ - cy_as_dma_set_drq(dev_p, ep_p->ep, cy_true) ; + cy_as_dma_set_drq(dev_p, ep_p->ep, cy_true); } } @@ -377,27 +377,27 @@ void cy_as_dma_completed_callback(cy_as_hal_device_tag tag, cy_as_end_point_number_t ep, uint32_t cnt, cy_as_return_status_t status) { - uint32_t mask ; - cy_as_dma_queue_entry *req_p ; - cy_as_dma_end_point *ep_p ; - cy_as_device *dev_p = cy_as_device_find_from_tag(tag) ; + uint32_t mask; + cy_as_dma_queue_entry *req_p; + cy_as_dma_end_point *ep_p; + cy_as_device *dev_p = cy_as_device_find_from_tag(tag); /* Make sure the HAL layer gave us good parameters */ - cy_as_hal_assert(dev_p != 0) ; - cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE) ; - cy_as_hal_assert(ep < 16) ; + cy_as_hal_assert(dev_p != 0); + cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE); + cy_as_hal_assert(ep < 16); /* Get the endpoint ptr */ - ep_p = CY_AS_NUM_EP(dev_p, ep) ; - cy_as_hal_assert(ep_p->queue_p != 0) ; + ep_p = CY_AS_NUM_EP(dev_p, ep); + cy_as_hal_assert(ep_p->queue_p != 0); /* Get a pointer to the current entry in the queue */ - mask = cy_as_hal_disable_interrupts() ; - req_p = ep_p->queue_p ; + mask = cy_as_hal_disable_interrupts(); + req_p = ep_p->queue_p; /* Update the offset to reflect the data actually received or sent */ - req_p->offset += cnt ; + req_p->offset += cnt; /* * if we are still sending/receiving the current packet, @@ -422,7 +422,7 @@ cy_as_dma_completed_callback(cy_as_hal_device_tag tag, ((ep != CY_AS_MTP_READ_ENDPOINT) || (cnt == dev_p->usb_max_tx_size))))) || ((ep == 1) && (status == CY_AS_ERROR_TIMEOUT))) { - cy_as_hal_enable_interrupts(mask) ; + cy_as_hal_enable_interrupts(mask); /* * and send the request again to send the next block of @@ -433,9 +433,9 @@ cy_as_dma_completed_callback(cy_as_hal_device_tag tag, if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || ( (ep == CY_AS_MTP_READ_ENDPOINT) && (!cy_as_dma_end_point_is_direction_in(ep_p)))) - cy_as_dma_end_point_set_stopped(ep_p) ; + cy_as_dma_end_point_set_stopped(ep_p); else - cy_as_dma_send_next_dma_request(dev_p, ep_p) ; + cy_as_dma_send_next_dma_request(dev_p, ep_p); } else { /* * we get here if ... @@ -449,18 +449,18 @@ cy_as_dma_completed_callback(cy_as_hal_device_tag tag, /* * remove this entry from the DMA queue for this endpoint. */ - cy_as_dma_end_point_clear_in_transit(ep_p) ; - ep_p->queue_p = req_p->next_p ; + cy_as_dma_end_point_clear_in_transit(ep_p); + ep_p->queue_p = req_p->next_p; if (ep_p->last_p == req_p) { /* * we have removed the last packet from the DMA queue, * disable the interrupt associated with this interrupt. */ - ep_p->last_p = 0 ; - cy_as_hal_enable_interrupts(mask) ; - cy_as_dma_set_drq(dev_p, ep, cy_false) ; + ep_p->last_p = 0; + cy_as_hal_enable_interrupts(mask); + cy_as_dma_set_drq(dev_p, ep, cy_false); } else - cy_as_hal_enable_interrupts(mask) ; + cy_as_hal_enable_interrupts(mask); if (req_p->cb) { /* @@ -472,32 +472,32 @@ cy_as_dma_completed_callback(cy_as_hal_device_tag tag, * cannot recursively call an API function that is * synchronous only from a callback. */ - cy_as_device_set_in_callback(dev_p) ; + cy_as_device_set_in_callback(dev_p); (*req_p->cb)(dev_p, ep, req_p->buf_p, - req_p->offset, status) ; - cy_as_device_clear_in_callback(dev_p) ; + req_p->offset, status); + cy_as_device_clear_in_callback(dev_p); } /* * we are done with this request, put it on the freelist to be * reused at a later time. */ - cy_as_dma_add_request_to_free_queue(dev_p, req_p) ; + cy_as_dma_add_request_to_free_queue(dev_p, req_p); if (ep_p->queue_p == 0) { /* * if the endpoint is out of DMA entries, set the * endpoint as stopped. */ - cy_as_dma_end_point_set_stopped(ep_p) ; + cy_as_dma_end_point_set_stopped(ep_p); /* * the DMA queue is empty, wake any task waiting on * the QUEUE to drain. */ if (cy_as_dma_end_point_is_sleeping(ep_p)) { - cy_as_dma_end_point_set_wake_state(ep_p) ; - cy_as_hal_wake(&ep_p->channel) ; + cy_as_dma_end_point_set_wake_state(ep_p); + cy_as_hal_wake(&ep_p->channel); } } else { /* @@ -512,9 +512,9 @@ cy_as_dma_completed_callback(cy_as_hal_device_tag tag, (!cy_as_device_is_p2s_dma_start_recvd(dev_p))) || ((ep == dev_p->storage_write_endpoint) && (!cy_as_device_is_p2s_dma_start_recvd(dev_p)))) - cy_as_dma_end_point_set_stopped(ep_p) ; + cy_as_dma_end_point_set_stopped(ep_p); else - cy_as_dma_send_next_dma_request(dev_p, ep_p) ; + cy_as_dma_send_next_dma_request(dev_p, ep_p); } } } @@ -529,17 +529,17 @@ cy_as_dma_completed_callback(cy_as_hal_device_tag tag, cy_as_return_status_t cy_as_dma_kick_start(cy_as_device *dev_p, cy_as_end_point_number_t ep) { - cy_as_dma_end_point *ep_p ; - cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE) ; + cy_as_dma_end_point *ep_p; + cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE); - ep_p = CY_AS_NUM_EP(dev_p, ep) ; + ep_p = CY_AS_NUM_EP(dev_p, ep); /* We are already running */ if (cy_as_dma_end_point_is_running(ep_p)) - return CY_AS_ERROR_SUCCESS ; + return CY_AS_ERROR_SUCCESS; cy_as_dma_send_next_dma_request(dev_p, ep_p); - return CY_AS_ERROR_SUCCESS ; + return CY_AS_ERROR_SUCCESS; } /* @@ -550,37 +550,37 @@ cy_as_dma_kick_start(cy_as_device *dev_p, cy_as_end_point_number_t ep) static cy_as_return_status_t cy_as_dma_stop_end_point(cy_as_device *dev_p, cy_as_end_point_number_t ep) { - cy_as_return_status_t ret ; - cy_as_dma_end_point *ep_p = CY_AS_NUM_EP(dev_p, ep) ; + cy_as_return_status_t ret; + cy_as_dma_end_point *ep_p = CY_AS_NUM_EP(dev_p, ep); /* * cancel any pending DMA requests associated with this endpoint. this * cancels any DMA requests at the HAL layer as well as dequeues any * request that is currently pending. */ - ret = cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED) ; + ret = cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED); if (ret != CY_AS_ERROR_SUCCESS) - return ret ; + return ret; /* * destroy the sleep channel */ if (!cy_as_hal_destroy_sleep_channel(&ep_p->channel) && ret == CY_AS_ERROR_SUCCESS) - ret = CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED ; + ret = CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED; /* * free the memory associated with this endpoint */ - cy_as_hal_free(ep_p) ; + cy_as_hal_free(ep_p); /* * set the data structure ptr to something sane since the * previous pointer is now free. */ - dev_p->endp[ep] = 0 ; + dev_p->endp[ep] = 0; - return ret ; + return ret; } /* @@ -591,18 +591,18 @@ cy_as_dma_stop_end_point(cy_as_device *dev_p, cy_as_end_point_number_t ep) static cy_as_return_status_t cy_as_dma_stop_internal(cy_as_device *dev_p) { - cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS ; - cy_as_return_status_t lret ; - cy_as_end_point_number_t i ; + cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS; + cy_as_return_status_t lret; + cy_as_end_point_number_t i; /* * stop all of the endpoints. this cancels all DMA requests, and * frees all resources associated with each endpoint. */ - for (i = 0 ; i < sizeof(dev_p->endp)/(sizeof(dev_p->endp[0])) ; i++) { - lret = cy_as_dma_stop_end_point(dev_p, i) ; + for (i = 0; i < sizeof(dev_p->endp)/(sizeof(dev_p->endp[0])); i++) { + lret = cy_as_dma_stop_end_point(dev_p, i); if (lret != CY_AS_ERROR_SUCCESS && ret == CY_AS_ERROR_SUCCESS) - ret = lret ; + ret = lret; } /* @@ -610,23 +610,23 @@ cy_as_dma_stop_internal(cy_as_device *dev_p) * DMA requests. */ while (dev_p->dma_freelist_p) { - cy_as_dma_queue_entry *req_p ; - uint32_t imask = cy_as_hal_disable_interrupts() ; + cy_as_dma_queue_entry *req_p; + uint32_t imask = cy_as_hal_disable_interrupts(); - req_p = dev_p->dma_freelist_p ; - dev_p->dma_freelist_p = req_p->next_p ; + req_p = dev_p->dma_freelist_p; + dev_p->dma_freelist_p = req_p->next_p; - cy_as_hal_enable_interrupts(imask) ; + cy_as_hal_enable_interrupts(imask); - cy_as_hal_free(req_p) ; + cy_as_hal_free(req_p); } - cy_as_ll_destroy_request(dev_p, dev_p->usb_ep0_dma_req) ; - cy_as_ll_destroy_request(dev_p, dev_p->usb_ep1_dma_req) ; - cy_as_ll_destroy_response(dev_p, dev_p->usb_ep0_dma_resp) ; - cy_as_ll_destroy_response(dev_p, dev_p->usb_ep1_dma_resp) ; + cy_as_ll_destroy_request(dev_p, dev_p->usb_ep0_dma_req); + cy_as_ll_destroy_request(dev_p, dev_p->usb_ep1_dma_req); + cy_as_ll_destroy_response(dev_p, dev_p->usb_ep0_dma_resp); + cy_as_ll_destroy_response(dev_p, dev_p->usb_ep1_dma_resp); - return ret ; + return ret; } @@ -642,12 +642,12 @@ cy_as_dma_stop_internal(cy_as_device *dev_p) cy_as_return_status_t cy_as_dma_stop(cy_as_device *dev_p) { - cy_as_return_status_t ret ; + cy_as_return_status_t ret; - ret = cy_as_dma_stop_internal(dev_p) ; - cy_as_device_set_dma_stopped(dev_p) ; + ret = cy_as_dma_stop_internal(dev_p); + cy_as_device_set_dma_stopped(dev_p); - return ret ; + return ret; } /* @@ -658,23 +658,23 @@ cy_as_dma_stop(cy_as_device *dev_p) cy_as_return_status_t cy_as_dma_start(cy_as_device *dev_p) { - cy_as_end_point_number_t i ; - uint16_t cnt ; + cy_as_end_point_number_t i; + uint16_t cnt; if (cy_as_device_is_dma_running(dev_p)) - return CY_AS_ERROR_ALREADY_RUNNING ; + return CY_AS_ERROR_ALREADY_RUNNING; /* * pre-allocate DMA queue structures to be used in the interrupt context */ - for (cnt = 0 ; cnt < 32 ; cnt++) { + for (cnt = 0; cnt < 32; cnt++) { cy_as_dma_queue_entry *entry_p = (cy_as_dma_queue_entry *) - cy_as_hal_alloc(sizeof(cy_as_dma_queue_entry)) ; + cy_as_hal_alloc(sizeof(cy_as_dma_queue_entry)); if (entry_p == 0) { - cy_as_dma_stop_internal(dev_p) ; - return CY_AS_ERROR_OUT_OF_MEMORY ; + cy_as_dma_stop_internal(dev_p); + return CY_AS_ERROR_OUT_OF_MEMORY; } - cy_as_dma_add_request_to_free_queue(dev_p, entry_p) ; + cy_as_dma_add_request_to_free_queue(dev_p, entry_p); } /* @@ -682,52 +682,52 @@ cy_as_dma_start(cy_as_device *dev_p) * and EP1 data to west bridge */ dev_p->usb_ep0_dma_req = cy_as_ll_create_request(dev_p, - CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64) ; + CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64); dev_p->usb_ep1_dma_req = cy_as_ll_create_request(dev_p, - CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64) ; + CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64); if (dev_p->usb_ep0_dma_req == 0 || dev_p->usb_ep1_dma_req == 0) { - cy_as_dma_stop_internal(dev_p) ; - return CY_AS_ERROR_OUT_OF_MEMORY ; + cy_as_dma_stop_internal(dev_p); + return CY_AS_ERROR_OUT_OF_MEMORY; } - dev_p->usb_ep0_dma_req_save = dev_p->usb_ep0_dma_req ; + dev_p->usb_ep0_dma_req_save = dev_p->usb_ep0_dma_req; - dev_p->usb_ep0_dma_resp = cy_as_ll_create_response(dev_p, 1) ; - dev_p->usb_ep1_dma_resp = cy_as_ll_create_response(dev_p, 1) ; + dev_p->usb_ep0_dma_resp = cy_as_ll_create_response(dev_p, 1); + dev_p->usb_ep1_dma_resp = cy_as_ll_create_response(dev_p, 1); if (dev_p->usb_ep0_dma_resp == 0 || dev_p->usb_ep1_dma_resp == 0) { - cy_as_dma_stop_internal(dev_p) ; - return CY_AS_ERROR_OUT_OF_MEMORY ; + cy_as_dma_stop_internal(dev_p); + return CY_AS_ERROR_OUT_OF_MEMORY; } - dev_p->usb_ep0_dma_resp_save = dev_p->usb_ep0_dma_resp ; + dev_p->usb_ep0_dma_resp_save = dev_p->usb_ep0_dma_resp; /* * set the dev_p->endp to all zeros to insure cleanup is possible if * an error occurs during initialization. */ - cy_as_hal_mem_set(dev_p->endp, 0, sizeof(dev_p->endp)) ; + cy_as_hal_mem_set(dev_p->endp, 0, sizeof(dev_p->endp)); /* * now, iterate through each of the endpoints and initialize each * one. */ - for (i = 0 ; i < sizeof(dev_p->endp)/sizeof(dev_p->endp[0]) ; i++) { + for (i = 0; i < sizeof(dev_p->endp)/sizeof(dev_p->endp[0]); i++) { dev_p->endp[i] = (cy_as_dma_end_point *) - cy_as_hal_alloc(sizeof(cy_as_dma_end_point)) ; + cy_as_hal_alloc(sizeof(cy_as_dma_end_point)); if (dev_p->endp[i] == 0) { - cy_as_dma_stop_internal(dev_p) ; - return CY_AS_ERROR_OUT_OF_MEMORY ; + cy_as_dma_stop_internal(dev_p); + return CY_AS_ERROR_OUT_OF_MEMORY; } cy_as_hal_mem_set(dev_p->endp[i], 0, - sizeof(cy_as_dma_end_point)) ; + sizeof(cy_as_dma_end_point)); |