Loading arch/sh/include/asm/cacheflush.h +7 −0 Original line number Diff line number Diff line Loading @@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr); void cpu_cache_init(void); static inline void *sh_cacheop_vaddr(void *vaddr) { if (__in_29bit_mode()) vaddr = (void *)CAC_ADDR((unsigned long)vaddr); return vaddr; } #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ arch/sh/mm/consistent.c +1 −5 Original line number Diff line number Diff line Loading @@ -74,10 +74,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void sh_sync_dma_for_device(void *vaddr, size_t size, enum dma_data_direction direction) { void *addr; addr = __in_29bit_mode() ? (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; void *addr = sh_cacheop_vaddr(vaddr); switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ Loading @@ -93,7 +90,6 @@ void sh_sync_dma_for_device(void *vaddr, size_t size, BUG(); } } EXPORT_SYMBOL(sh_sync_dma_for_device); static int __init memchunk_setup(char *str) { Loading drivers/sh/maple/maple.c +4 −3 Original line number Diff line number Diff line Loading @@ -300,8 +300,8 @@ static void maple_send(void) mutex_unlock(&maple_wlist_lock); if (maple_packets > 0) { for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE, DMA_BIDIRECTIONAL); __flush_purge_region(maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE); } finish: Loading Loading @@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work) list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { mdev = mq->dev; recvbuf = mq->recvbuf->buf; sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE); __flush_invalidate_region(sh_cacheop_vaddr(recvbuf), 0x400); code = recvbuf[0]; kfree(mq->sendbuf); list_del_init(&mq->list); Loading Loading
arch/sh/include/asm/cacheflush.h +7 −0 Original line number Diff line number Diff line Loading @@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr); void cpu_cache_init(void); static inline void *sh_cacheop_vaddr(void *vaddr) { if (__in_29bit_mode()) vaddr = (void *)CAC_ADDR((unsigned long)vaddr); return vaddr; } #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */
arch/sh/mm/consistent.c +1 −5 Original line number Diff line number Diff line Loading @@ -74,10 +74,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void sh_sync_dma_for_device(void *vaddr, size_t size, enum dma_data_direction direction) { void *addr; addr = __in_29bit_mode() ? (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; void *addr = sh_cacheop_vaddr(vaddr); switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ Loading @@ -93,7 +90,6 @@ void sh_sync_dma_for_device(void *vaddr, size_t size, BUG(); } } EXPORT_SYMBOL(sh_sync_dma_for_device); static int __init memchunk_setup(char *str) { Loading
drivers/sh/maple/maple.c +4 −3 Original line number Diff line number Diff line Loading @@ -300,8 +300,8 @@ static void maple_send(void) mutex_unlock(&maple_wlist_lock); if (maple_packets > 0) { for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE, DMA_BIDIRECTIONAL); __flush_purge_region(maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE); } finish: Loading Loading @@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work) list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { mdev = mq->dev; recvbuf = mq->recvbuf->buf; sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE); __flush_invalidate_region(sh_cacheop_vaddr(recvbuf), 0x400); code = recvbuf[0]; kfree(mq->sendbuf); list_del_init(&mq->list); Loading