diff --git a/[refs] b/[refs] index e916bdf497fa..b1c4bc2fe750 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a068682cd69461911407411b4198248a87c583e2 +refs/heads/master: 63fe23c34e1c18725bd1459897bf3a46335d88b7 diff --git a/trunk/arch/arm/mach-spear3xx/spear300.c b/trunk/arch/arm/mach-spear3xx/spear300.c index 6ec300549960..0f882ecb7d81 100644 --- a/trunk/arch/arm/mach-spear3xx/spear300.c +++ b/trunk/arch/arm/mach-spear3xx/spear300.c @@ -120,156 +120,182 @@ struct pl08x_channel_data spear300_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/trunk/arch/arm/mach-spear3xx/spear310.c b/trunk/arch/arm/mach-spear3xx/spear310.c index 1d0e435b9045..bbcf4571d361 100644 --- a/trunk/arch/arm/mach-spear3xx/spear310.c +++ b/trunk/arch/arm/mach-spear3xx/spear310.c @@ -205,156 +205,182 @@ struct pl08x_channel_data spear310_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/trunk/arch/arm/mach-spear3xx/spear320.c b/trunk/arch/arm/mach-spear3xx/spear320.c index fd823c624575..88d483bcd66a 100644 --- a/trunk/arch/arm/mach-spear3xx/spear320.c +++ b/trunk/arch/arm/mach-spear3xx/spear320.c @@ -213,156 +213,182 @@ struct pl08x_channel_data spear320_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB2, }, }; diff --git a/trunk/arch/arm/mach-spear3xx/spear3xx.c b/trunk/arch/arm/mach-spear3xx/spear3xx.c index d6cd8403fe66..0f41bd1c47c3 100644 --- a/trunk/arch/arm/mach-spear3xx/spear3xx.c +++ b/trunk/arch/arm/mach-spear3xx/spear3xx.c @@ -46,8 +46,7 @@ struct pl022_ssp_controller pl022_plat_data = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl_memcpy = - (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/trunk/arch/arm/mach-spear6xx/spear6xx.c b/trunk/arch/arm/mach-spear6xx/spear6xx.c index b59ae5369e7b..2e2e3596583e 100644 --- a/trunk/arch/arm/mach-spear6xx/spear6xx.c +++ b/trunk/arch/arm/mach-spear6xx/spear6xx.c @@ -36,288 +36,336 @@ static struct pl08x_channel_data spear600_dma_info[] = { .min_signal = 0, .max_signal = 0, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp2_rx", .min_signal = 6, .max_signal = 6, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 7, .max_signal = 7, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, + .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ext0_rx", .min_signal = 0, .max_signal = 0, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext0_tx", .min_signal = 1, .max_signal = 1, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_rx", .min_signal = 2, .max_signal = 2, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_tx", .min_signal = 3, .max_signal = 3, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_rx", .min_signal = 4, .max_signal = 4, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_tx", .min_signal = 5, .max_signal = 5, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_rx", .min_signal = 6, .max_signal = 6, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_tx", .min_signal = 7, .max_signal = 7, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_rx", .min_signal = 8, .max_signal = 8, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_tx", .min_signal = 9, .max_signal = 9, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_rx", .min_signal = 10, .max_signal = 10, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_tx", .min_signal = 11, .max_signal = 11, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_rx", .min_signal = 12, .max_signal = 12, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_tx", .min_signal = 13, .max_signal = 13, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_rx", .min_signal = 14, .max_signal = 14, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_tx", .min_signal = 15, .max_signal = 15, .muxval = 2, + .cctl = 0, .periph_buses = PL08X_AHB2, }, }; @@ -325,8 +373,7 @@ static struct pl08x_channel_data spear600_dma_info[] = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl_memcpy = - (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/trunk/arch/arm/plat-spear/include/plat/pl080.h b/trunk/arch/arm/plat-spear/include/plat/pl080.h index eb6590ded40d..2bc6b54460a8 100644 --- a/trunk/arch/arm/plat-spear/include/plat/pl080.h +++ b/trunk/arch/arm/plat-spear/include/plat/pl080.h @@ -14,8 +14,8 @@ #ifndef __PLAT_PL080_H #define __PLAT_PL080_H -struct pl08x_channel_data; -int pl080_get_signal(const struct pl08x_channel_data *cd); -void pl080_put_signal(const struct pl08x_channel_data *cd, int signal); +struct pl08x_dma_chan; +int pl080_get_signal(struct pl08x_dma_chan *ch); +void pl080_put_signal(struct pl08x_dma_chan *ch); #endif /* __PLAT_PL080_H */ diff --git a/trunk/arch/arm/plat-spear/pl080.c b/trunk/arch/arm/plat-spear/pl080.c index cfa1199d0f4a..12cf27f935f9 100644 --- a/trunk/arch/arm/plat-spear/pl080.c +++ b/trunk/arch/arm/plat-spear/pl080.c @@ -27,8 +27,9 @@ struct { unsigned char val; } signals[16] = {{0, 0}, }; -int pl080_get_signal(const struct pl08x_channel_data *cd) +int pl080_get_signal(struct pl08x_dma_chan *ch) { + const struct pl08x_channel_data *cd = ch->cd; unsigned int signal = cd->min_signal, val; unsigned long flags; @@ -62,17 +63,18 @@ int pl080_get_signal(const struct pl08x_channel_data *cd) return signal; } -void pl080_put_signal(const struct pl08x_channel_data *cd, int signal) +void pl080_put_signal(struct pl08x_dma_chan *ch) { + const struct pl08x_channel_data *cd = ch->cd; unsigned long flags; spin_lock_irqsave(&lock, flags); /* if signal is not used */ - if (!signals[signal].busy) + if (!signals[cd->min_signal].busy) BUG(); - signals[signal].busy--; + signals[cd->min_signal].busy--; spin_unlock_irqrestore(&lock, flags); } diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig index be0dc3b7c409..eb2b60e8e1cb 100644 --- a/trunk/drivers/dma/Kconfig +++ b/trunk/drivers/dma/Kconfig @@ -53,7 +53,6 @@ config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA && EXPERIMENTAL select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS help Platform has a PL08x DMAC device which can provide DMA engine support diff --git a/trunk/drivers/dma/amba-pl08x.c b/trunk/drivers/dma/amba-pl08x.c index 6fbeebb9486f..49ecbbb8932d 100644 --- a/trunk/drivers/dma/amba-pl08x.c +++ b/trunk/drivers/dma/amba-pl08x.c @@ -86,12 +86,10 @@ #include #include "dmaengine.h" -#include "virt-dma.h" #define DRIVER_NAME "pl08xdmac" static struct amba_driver pl08x_amba_driver; -struct pl08x_driver_data; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives @@ -120,123 +118,6 @@ struct pl08x_lli { u32 cctl; }; -/** - * struct pl08x_bus_data - information of source or destination - * busses for a transfer - * @addr: current address - * @maxwidth: the maximum width of a transfer on this bus - * @buswidth: the width of this bus in bytes: 1, 2 or 4 - */ -struct pl08x_bus_data { - dma_addr_t addr; - u8 maxwidth; - u8 buswidth; -}; - -/** - * struct pl08x_phy_chan - holder for the physical channels - * @id: physical index to this channel - * @lock: a lock to use when altering an instance of this struct - * @serving: the virtual channel currently being served by this physical - * channel - * @locked: channel unavailable for the system, e.g. dedicated to secure - * world - */ -struct pl08x_phy_chan { - unsigned int id; - void __iomem *base; - spinlock_t lock; - struct pl08x_dma_chan *serving; - bool locked; -}; - -/** - * struct pl08x_sg - structure containing data per sg - * @src_addr: src address of sg - * @dst_addr: dst address of sg - * @len: transfer len in bytes - * @node: node for txd's dsg_list - */ -struct pl08x_sg { - dma_addr_t src_addr; - dma_addr_t dst_addr; - size_t len; - struct list_head node; -}; - -/** - * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor - * @vd: virtual DMA descriptor - * @dsg_list: list of children sg's - * @llis_bus: DMA memory address (physical) start for the LLIs - * @llis_va: virtual memory address start for the LLIs - * @cctl: control reg values for current txd - * @ccfg: config reg values for current txd - * @done: this marks completed descriptors, which should not have their - * mux released. - */ -struct pl08x_txd { - struct virt_dma_desc vd; - struct list_head dsg_list; - dma_addr_t llis_bus; - struct pl08x_lli *llis_va; - /* Default cctl value for LLIs */ - u32 cctl; - /* - * Settings to be put into the physical channel when we - * trigger this txd. Other registers are in llis_va[0]. - */ - u32 ccfg; - bool done; -}; - -/** - * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel - * states - * @PL08X_CHAN_IDLE: the channel is idle - * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport - * channel and is running a transfer on it - * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport - * channel, but the transfer is currently paused - * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport - * channel to become available (only pertains to memcpy channels) - */ -enum pl08x_dma_chan_state { - PL08X_CHAN_IDLE, - PL08X_CHAN_RUNNING, - PL08X_CHAN_PAUSED, - PL08X_CHAN_WAITING, -}; - -/** - * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel - * @vc: wrappped virtual channel - * @phychan: the physical channel utilized by this channel, if there is one - * @name: name of channel - * @cd: channel platform data - * @runtime_addr: address for RX/TX according to the runtime config - * @at: active transaction on this channel - * @lock: a lock for this channel data - * @host: a pointer to the host (internal use) - * @state: whether the channel is idle, paused, running etc - * @slave: whether this channel is a device (slave) or for memcpy - * @signal: the physical DMA request signal which this channel is using - * @mux_use: count of descriptors using this DMA request signal setting - */ -struct pl08x_dma_chan { - struct virt_dma_chan vc; - struct pl08x_phy_chan *phychan; - const char *name; - const struct pl08x_channel_data *cd; - struct dma_slave_config cfg; - struct pl08x_txd *at; - struct pl08x_driver_data *host; - enum pl08x_dma_chan_state state; - bool slave; - int signal; - unsigned mux_use; -}; - /** * struct pl08x_driver_data - the local state holder for the PL08x * @slave: slave engine for this instance @@ -247,6 +128,7 @@ struct pl08x_dma_chan { * @pd: platform data passed in from the platform/machine * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors + * @pool_ctr: counter of LLIs in the pool * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. @@ -261,8 +143,10 @@ struct pl08x_driver_data { struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; + int pool_ctr; u8 lli_buses; u8 mem_buses; + spinlock_t lock; }; /* @@ -278,51 +162,12 @@ struct pl08x_driver_data { static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) { - return container_of(chan, struct pl08x_dma_chan, vc.chan); + return container_of(chan, struct pl08x_dma_chan, chan); } static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) { - return container_of(tx, struct pl08x_txd, vd.tx); -} - -/* - * Mux handling. - * - * This gives us the DMA request input to the PL08x primecell which the - * peripheral described by the channel data will be routed to, possibly - * via a board/SoC specific external MUX. One important point to note - * here is that this does not depend on the physical channel. - */ -static int pl08x_request_mux(struct pl08x_dma_chan *plchan) -{ - const struct pl08x_platform_data *pd = plchan->host->pd; - int ret; - - if (plchan->mux_use++ == 0 && pd->get_signal) { - ret = pd->get_signal(plchan->cd); - if (ret < 0) { - plchan->mux_use = 0; - return ret; - } - - plchan->signal = ret; - } - return 0; -} - -static void pl08x_release_mux(struct pl08x_dma_chan *plchan) -{ - const struct pl08x_platform_data *pd = plchan->host->pd; - - if (plchan->signal >= 0) { - WARN_ON(plchan->mux_use == 0); - - if (--plchan->mux_use == 0 && pd->put_signal) { - pd->put_signal(plchan->cd, plchan->signal); - plchan->signal = -1; - } - } + return container_of(tx, struct pl08x_txd, tx); } /* @@ -344,25 +189,20 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) * been set when the LLIs were constructed. Poke them into the hardware * and start the transfer. */ -static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) +static void pl08x_start_txd(struct pl08x_dma_chan *plchan, + struct pl08x_txd *txd) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *phychan = plchan->phychan; - struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); - struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); - struct pl08x_lli *lli; + struct pl08x_lli *lli = &txd->llis_va[0]; u32 val; - list_del(&txd->vd.node); - plchan->at = txd; /* Wait for channel inactive */ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); - lli = &txd->llis_va[0]; - dev_vdbg(&pl08x->adev->dev, "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", @@ -471,8 +311,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; struct pl08x_txd *txd; + unsigned long flags; size_t bytes = 0; + spin_lock_irqsave(&plchan->lock, flags); ch = plchan->phychan; txd = plchan->at; @@ -512,6 +354,18 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) } } + /* Sum up all queued transactions */ + if (!list_empty(&plchan->pend_list)) { + struct pl08x_txd *txdi; + list_for_each_entry(txdi, &plchan->pend_list, node) { + struct pl08x_sg *dsg; + list_for_each_entry(dsg, &txd->dsg_list, node) + bytes += dsg->len; + } + } + + spin_unlock_irqrestore(&plchan->lock, flags); + return bytes; } @@ -537,6 +391,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, if (!ch->locked && !ch->serving) { ch->serving = virt_chan; + ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); break; } @@ -549,114 +404,25 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, return NULL; } + pm_runtime_get_sync(&pl08x->adev->dev); return ch; } -/* Mark the physical channel as free. Note, this write is atomic. */ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - ch->serving = NULL; -} - -/* - * Try to allocate a physical channel. When successful, assign it to - * this virtual channel, and initiate the next descriptor. The - * virtual channel lock must be held at this point. - */ -static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) -{ - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_phy_chan *ch; - - ch = pl08x_get_phy_channel(pl08x, plchan); - if (!ch) { - dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); - plchan->state = PL08X_CHAN_WAITING; - return; - } - - dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", - ch->id, plchan->name); - - plchan->phychan = ch; - plchan->state = PL08X_CHAN_RUNNING; - pl08x_start_next_txd(plchan); -} - -static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, - struct pl08x_dma_chan *plchan) -{ - struct pl08x_driver_data *pl08x = plchan->host; - - dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", - ch->id, plchan->name); - - /* - * We do this without taking the lock; we're really only concerned - * about whether this pointer is NULL or not, and we're guaranteed - * that this will only be called when it _already_ is non-NULL. - */ - ch->serving = plchan; - plchan->phychan = ch; - plchan->state = PL08X_CHAN_RUNNING; - pl08x_start_next_txd(plchan); -} - -/* - * Free a physical DMA channel, potentially reallocating it to another - * virtual channel if we have any pending. - */ -static void pl08x_phy_free(struct pl08x_dma_chan *plchan) -{ - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_dma_chan *p, *next; - - retry: - next = NULL; - - /* Find a waiting virtual channel for the next transfer. */ - list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) - if (p->state == PL08X_CHAN_WAITING) { - next = p; - break; - } + unsigned long flags; - if (!next) { - list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) - if (p->state == PL08X_CHAN_WAITING) { - next = p; - break; - } - } + spin_lock_irqsave(&ch->lock, flags); - /* Ensure that the physical channel is stopped */ - pl08x_terminate_phy_chan(pl08x, plchan->phychan); + /* Stop the channel and clear its interrupts */ + pl08x_terminate_phy_chan(pl08x, ch); - if (next) { - bool success; + pm_runtime_put(&pl08x->adev->dev); - /* - * Eww. We know this isn't going to deadlock - * but lockdep probably doesn't. - */ - spin_lock(&next->vc.lock); - /* Re-check the state now that we have the lock */ - success = next->state == PL08X_CHAN_WAITING; - if (success) - pl08x_phy_reassign_start(plchan->phychan, next); - spin_unlock(&next->vc.lock); - - /* If the state changed, try to find another channel */ - if (!success) - goto retry; - } else { - /* No more jobs, so free up the physical channel */ - pl08x_put_phy_channel(pl08x, plchan->phychan); - } - - plchan->phychan = NULL; - plchan->state = PL08X_CHAN_IDLE; + /* Mark it as free */ + ch->serving = NULL; + spin_unlock_irqrestore(&ch->lock, flags); } /* @@ -819,6 +585,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return 0; } + pl08x->pool_ctr++; + bd.txd = txd; bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; cctl = txd->cctl; @@ -1034,14 +802,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return num_llis; } +/* You should call this with the struct pl08x lock held */ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_sg *dsg, *_dsg; + /* Free the LLI */ if (txd->llis_va) dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); + pl08x->pool_ctr--; + list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); @@ -1050,75 +822,133 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) +static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, + struct pl08x_dma_chan *plchan) { - struct device *dev = txd->vd.tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); + struct pl08x_txd *txdi = NULL; + struct pl08x_txd *next; + + if (!list_empty(&plchan->pend_list)) { + list_for_each_entry_safe(txdi, + next, &plchan->pend_list, node) { + list_del(&txdi->node); + pl08x_free_txd(pl08x, txdi); } } - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } } -static void pl08x_desc_free(struct virt_dma_desc *vd) +/* + * The DMA ENGINE API + */ +static int pl08x_alloc_chan_resources(struct dma_chan *chan) { - struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); - struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); - - if (!plchan->slave) - pl08x_unmap_buffers(txd); - - if (!txd->done) - pl08x_release_mux(plchan); + return 0; +} - pl08x_free_txd(plchan->host, txd); +static void pl08x_free_chan_resources(struct dma_chan *chan) +{ } -static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, - struct pl08x_dma_chan *plchan) +/* + * This should be called with the channel plchan->lock held + */ +static int prep_phy_channel(struct pl08x_dma_chan *plchan, + struct pl08x_txd *txd) { - LIST_HEAD(head); - struct pl08x_txd *txd; + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_phy_chan *ch; + int ret; - vchan_get_all_descriptors(&plchan->vc, &head); + /* Check if we already have a channel */ + if (plchan->phychan) { + ch = plchan->phychan; + goto got_channel; + } - while (!list_empty(&head)) { - txd = list_first_entry(&head, struct pl08x_txd, vd.node); - list_del(&txd->vd.node); - pl08x_desc_free(&txd->vd); + ch = pl08x_get_phy_channel(pl08x, plchan); + if (!ch) { + /* No physical channel available, cope with it */ + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); + return -EBUSY; } + + /* + * OK we have a physical channel: for memcpy() this is all we + * need, but for slaves the physical signals may be muxed! + * Can the platform allow us to use this channel? + */ + if (plchan->slave && pl08x->pd->get_signal) { + ret = pl08x->pd->get_signal(plchan); + if (ret < 0) { + dev_dbg(&pl08x->adev->dev, + "unable to use physical channel %d for transfer on %s due to platform restrictions\n", + ch->id, plchan->name); + /* Release physical channel & return */ + pl08x_put_phy_channel(pl08x, ch); + return -EBUSY; + } + ch->signal = ret; + } + + plchan->phychan = ch; + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", + ch->id, + ch->signal, + plchan->name); + +got_channel: + /* Assign the flow control signal to this channel */ + if (txd->direction == DMA_MEM_TO_DEV) + txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; + else if (txd->direction == DMA_DEV_TO_MEM) + txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; + + plchan->phychan_hold++; + + return 0; } -/* - * The DMA ENGINE API - */ -static int pl08x_alloc_chan_resources(struct dma_chan *chan) +static void release_phy_channel(struct pl08x_dma_chan *plchan) { - return 0; + struct pl08x_driver_data *pl08x = plchan->host; + + if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { + pl08x->pd->put_signal(plchan); + plchan->phychan->signal = -1; + } + pl08x_put_phy_channel(pl08x, plchan->phychan); + plchan->phychan = NULL; } -static void pl08x_free_chan_resources(struct dma_chan *chan) +static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) { - /* Ensure all queued descriptors are freed */ - vchan_free_chan_resources(to_virt_chan(chan)); + struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); + struct pl08x_txd *txd = to_pl08x_txd(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&plchan->lock, flags); + cookie = dma_cookie_assign(tx); + + /* Put this onto the pending list */ + list_add_tail(&txd->node, &plchan->pend_list); + + /* + * If there was no physical channel available for this memcpy, + * stack the request up and indicate that the channel is waiting + * for a free physical channel. + */ + if (!plchan->slave && !plchan->phychan) { + /* Do this memcpy whenever there is a channel ready */ + plchan->state = PL08X_CHAN_WAITING; + plchan->waiting = txd; + } else { + plchan->phychan_hold--; + } + + spin_unlock_irqrestore(&plchan->lock, flags); + + return cookie; } static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( @@ -1138,53 +968,23 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - struct virt_dma_desc *vd; - unsigned long flags; enum dma_status ret; - size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) return ret; - /* - * There's no point calculating the residue if there's - * no txstate to store the value. - */ - if (!txstate) { - if (plchan->state == PL08X_CHAN_PAUSED) - ret = DMA_PAUSED; - return ret; - } - - spin_lock_irqsave(&plchan->vc.lock, flags); - ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { - vd = vchan_find_desc(&plchan->vc, cookie); - if (vd) { - /* On the issued list, so hasn't been processed yet */ - struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); - struct pl08x_sg *dsg; - - list_for_each_entry(dsg, &txd->dsg_list, node) - bytes += dsg->len; - } else { - bytes = pl08x_getbytes_chan(plchan); - } - } - spin_unlock_irqrestore(&plchan->vc.lock, flags); - /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ - dma_set_residue(txstate, bytes); + dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); - if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) - ret = DMA_PAUSED; + if (plchan->state == PL08X_CHAN_PAUSED) + return DMA_PAUSED; /* Whether waiting or running, we're in progress */ - return ret; + return DMA_IN_PROGRESS; } /* PrimeCell DMA extension */ @@ -1280,14 +1080,38 @@ static u32 pl08x_burst(u32 maxburst) return burst_sizes[i].reg; } -static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, - enum dma_slave_buswidth addr_width, u32 maxburst) +static int dma_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) { - u32 width, burst, cctl = 0; + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct pl08x_driver_data *pl08x = plchan->host; + enum dma_slave_buswidth addr_width; + u32 width, burst, maxburst; + u32 cctl = 0; + + if (!plchan->slave) + return -EINVAL; + + /* Transfer direction */ + plchan->runtime_direction = config->direction; + if (config->direction == DMA_MEM_TO_DEV) { + addr_width = config->dst_addr_width; + maxburst = config->dst_maxburst; + } else if (config->direction == DMA_DEV_TO_MEM) { + addr_width = config->src_addr_width; + maxburst = config->src_maxburst; + } else { + dev_err(&pl08x->adev->dev, + "bad runtime_config: alien transfer direction\n"); + return -EINVAL; + } width = pl08x_width(addr_width); - if (width == ~0) - return ~0; + if (width == ~0) { + dev_err(&pl08x->adev->dev, + "bad runtime_config: alien address width\n"); + return -EINVAL; + } cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; @@ -1304,23 +1128,28 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; - return pl08x_cctl(cctl); -} - -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) -{ - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - - if (!plchan->slave) - return -EINVAL; + plchan->device_fc = config->device_fc; - /* Reject definitely invalid configurations */ - if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || - config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) - return -EINVAL; + if (plchan->runtime_direction == DMA_DEV_TO_MEM) { + plchan->src_addr = config->src_addr; + plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | + pl08x_select_bus(plchan->cd->periph_buses, + pl08x->mem_buses); + } else { + plchan->dst_addr = config->dst_addr; + plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | + pl08x_select_bus(pl08x->mem_buses, + plchan->cd->periph_buses); + } - plchan->cfg = *config; + dev_dbg(&pl08x->adev->dev, + "configured channel %s (%s) for %s, data width %d, " + "maxburst %d words, LE, CCTL=0x%08x\n", + dma_chan_name(chan), plchan->name, + (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", + addr_width, + maxburst, + cctl); return 0; } @@ -1334,19 +1163,95 @@ static void pl08x_issue_pending(struct dma_chan *chan) struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; - spin_lock_irqsave(&plchan->vc.lock, flags); - if (vchan_issue_pending(&plchan->vc)) { - if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) - pl08x_phy_alloc_and_start(plchan); + spin_lock_irqsave(&plchan->lock, flags); + /* Something is already active, or we're waiting for a channel... */ + if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { + spin_unlock_irqrestore(&plchan->lock, flags); + return; + } + + /* Take the first element in the queue and execute it */ + if (!list_empty(&plchan->pend_list)) { + struct pl08x_txd *next; + + next = list_first_entry(&plchan->pend_list, + struct pl08x_txd, + node); + list_del(&next->node); + plchan->state = PL08X_CHAN_RUNNING; + + pl08x_start_txd(plchan, next); } - spin_unlock_irqrestore(&plchan->vc.lock, flags); + + spin_unlock_irqrestore(&plchan->lock, flags); +} + +static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, + struct pl08x_txd *txd) +{ + struct pl08x_driver_data *pl08x = plchan->host; + unsigned long flags; + int num_llis, ret; + + num_llis = pl08x_fill_llis_for_desc(pl08x, txd); + if (!num_llis) { + spin_lock_irqsave(&plchan->lock, flags); + pl08x_free_txd(pl08x, txd); + spin_unlock_irqrestore(&plchan->lock, flags); + return -EINVAL; + } + + spin_lock_irqsave(&plchan->lock, flags); + + /* + * See if we already have a physical channel allocated, + * else this is the time to try to get one. + */ + ret = prep_phy_channel(plchan, txd); + if (ret) { + /* + * No physical channel was available. + * + * memcpy transfers can be sorted out at submission time. + * + * Slave transfers may have been denied due to platform + * channel muxing restrictions. Since there is no guarantee + * that this will ever be resolved, and the signal must be + * acquired AFTER acquiring the physical channel, we will let + * them be NACK:ed with -EBUSY here. The drivers can retry + * the prep() call if they are eager on doing this using DMA. + */ + if (plchan->slave) { + pl08x_free_txd_list(pl08x, plchan); + pl08x_free_txd(pl08x, txd); + spin_unlock_irqrestore(&plchan->lock, flags); + return -EBUSY; + } + } else + /* + * Else we're all set, paused and ready to roll, status + * will switch to PL08X_CHAN_RUNNING when we call + * issue_pending(). If there is something running on the + * channel already we don't change its state. + */ + if (plchan->state == PL08X_CHAN_IDLE) + plchan->state = PL08X_CHAN_PAUSED; + + spin_unlock_irqrestore(&plchan->lock, flags); + + return 0; } -static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) +static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, + unsigned long flags) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { + dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); + txd->tx.flags = flags; + txd->tx.tx_submit = pl08x_tx_submit; + INIT_LIST_HEAD(&txd->node); INIT_LIST_HEAD(&txd->dsg_list); /* Always enable error and terminal interrupts */ @@ -1369,7 +1274,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct pl08x_sg *dsg; int ret; - txd = pl08x_get_txd(plchan); + txd = pl08x_get_txd(plchan, flags); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); @@ -1385,13 +1290,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( } list_add_tail(&dsg->node, &txd->dsg_list); + txd->direction = DMA_NONE; dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* Set platform data for m2m */ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & + txd->cctl = pl08x->pd->memcpy_channel.cctl & ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); /* Both to be incremented or the code will break */ @@ -1401,13 +1307,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( txd->cctl |= pl08x_select_bus(pl08x->mem_buses, pl08x->mem_buses); - ret = pl08x_fill_llis_for_desc(plchan->host, txd); - if (!ret) { - pl08x_free_txd(pl08x, txd); + ret = pl08x_prep_channel_resources(plchan, txd); + if (ret) return NULL; - } - return vchan_tx_prep(&plchan->vc, &txd->vd, flags); + return &txd->tx; } static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( @@ -1420,40 +1324,36 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_txd *txd; struct pl08x_sg *dsg; struct scatterlist *sg; - enum dma_slave_buswidth addr_width; dma_addr_t slave_addr; int ret, tmp; - u8 src_buses, dst_buses; - u32 maxburst, cctl; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sg_dma_len(sgl), plchan->name); - txd = pl08x_get_txd(plchan); + txd = pl08x_get_txd(plchan, flags); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } + if (direction != plchan->runtime_direction) + dev_err(&pl08x->adev->dev, "%s DMA setup does not match " + "the direction configured for the PrimeCell\n", + __func__); + /* * Set up addresses, the PrimeCell configured address * will take precedence since this may configure the * channel target address dynamically at runtime. */ + txd->direction = direction; + if (direction == DMA_MEM_TO_DEV) { - cctl = PL080_CONTROL_SRC_INCR; - slave_addr = plchan->cfg.dst_addr; - addr_width = plchan->cfg.dst_addr_width; - maxburst = plchan->cfg.dst_maxburst; - src_buses = pl08x->mem_buses; - dst_buses = plchan->cd->periph_buses; + txd->cctl = plchan->dst_cctl; + slave_addr = plchan->dst_addr; } else if (direction == DMA_DEV_TO_MEM) { - cctl = PL080_CONTROL_DST_INCR; - slave_addr = plchan->cfg.src_addr; - addr_width = plchan->cfg.src_addr_width; - maxburst = plchan->cfg.src_maxburst; - src_buses = plchan->cd->periph_buses; - dst_buses = pl08x->mem_buses; + txd->cctl = plchan->src_cctl; + slave_addr = plchan->src_addr; } else { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, @@ -1461,17 +1361,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( return NULL; } - cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); - if (cctl == ~0) { - pl08x_free_txd(pl08x, txd); - dev_err(&pl08x->adev->dev, - "DMA slave configuration botched?\n"); - return NULL; - } - - txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); - - if (plchan->cfg.device_fc) + if (plchan->device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else @@ -1480,28 +1370,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; - ret = pl08x_request_mux(plchan); - if (ret < 0) { - pl08x_free_txd(pl08x, txd); - dev_dbg(&pl08x->adev->dev, - "unable to mux for transfer on %s due to platform restrictions\n", - plchan->name); - return NULL; - } - - dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", - plchan->signal, plchan->name); - - /* Assign the flow control signal to this channel */ - if (direction == DMA_MEM_TO_DEV) - txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; - else - txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; - for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { - pl08x_release_mux(plchan); pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); @@ -1519,14 +1390,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( } } - ret = pl08x_fill_llis_for_desc(plchan->host, txd); - if (!ret) { - pl08x_release_mux(plchan); - pl08x_free_txd(pl08x, txd); + ret = pl08x_prep_channel_resources(plchan, txd); + if (ret) return NULL; - } - return vchan_tx_prep(&plchan->vc, &txd->vd, flags); + return &txd->tx; } static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -1547,9 +1415,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->vc.lock, flags); + spin_lock_irqsave(&plchan->lock, flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->vc.lock, flags); + spin_unlock_irqrestore(&plchan->lock, flags); return 0; } @@ -1558,15 +1426,18 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { + pl08x_terminate_phy_chan(pl08x, plchan->phychan); + /* * Mark physical channel as free and free any slave * signal */ - pl08x_phy_free(plchan); + release_phy_channel(plchan); + plchan->phychan_hold = 0; } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_desc_free(&plchan->at->vd); + pl08x_free_txd(pl08x, plchan->at); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -1586,7 +1457,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, break; } - spin_unlock_irqrestore(&plchan->vc.lock, flags); + spin_unlock_irqrestore(&plchan->lock, flags); return ret; } @@ -1623,6 +1494,123 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } +static void pl08x_unmap_buffers(struct pl08x_txd *txd) +{ + struct device *dev = txd->tx.chan->device->dev; + struct pl08x_sg *dsg; + + if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); + else { + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); + } + } + if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + else + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + } +} + +static void pl08x_tasklet(unsigned long data) +{ + struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_txd *txd; + unsigned long flags; + + spin_lock_irqsave(&plchan->lock, flags); + + txd = plchan->at; + plchan->at = NULL; + + if (txd) { + /* Update last completed */ + dma_cookie_complete(&txd->tx); + } + + /* If a new descriptor is queued, set it up plchan->at is NULL here */ + if (!list_empty(&plchan->pend_list)) { + struct pl08x_txd *next; + + next = list_first_entry(&plchan->pend_list, + struct pl08x_txd, + node); + list_del(&next->node); + + pl08x_start_txd(plchan, next); + } else if (plchan->phychan_hold) { + /* + * This channel is still in use - we have a new txd being + * prepared and will soon be queued. Don't give up the + * physical channel. + */ + } else { + struct pl08x_dma_chan *waiting = NULL; + + /* + * No more jobs, so free up the physical channel + * Free any allocated signal on slave transfers too + */ + release_phy_channel(plchan); + plchan->state = PL08X_CHAN_IDLE; + + /* + * And NOW before anyone else can grab that free:d up + * physical channel, see if there is some memcpy pending + * that seriously needs to start because of being stacked + * up while we were choking the physical channels with data. + */ + list_for_each_entry(waiting, &pl08x->memcpy.channels, + chan.device_node) { + if (waiting->state == PL08X_CHAN_WAITING && + waiting->waiting != NULL) { + int ret; + + /* This should REALLY not fail now */ + ret = prep_phy_channel(waiting, + waiting->waiting); + BUG_ON(ret); + waiting->phychan_hold--; + waiting->state = PL08X_CHAN_RUNNING; + waiting->waiting = NULL; + pl08x_issue_pending(&waiting->chan); + break; + } + } + } + + spin_unlock_irqrestore(&plchan->lock, flags); + + if (txd) { + dma_async_tx_callback callback = txd->tx.callback; + void *callback_param = txd->tx.callback_param; + + /* Don't try to unmap buffers on slave channels */ + if (!plchan->slave) + pl08x_unmap_buffers(txd); + + /* Free the descriptor */ + spin_lock_irqsave(&plchan->lock, flags); + pl08x_free_txd(pl08x, txd); + spin_unlock_irqrestore(&plchan->lock, flags); + + /* Callback to signal completion */ + if (callback) + callback(callback_param); + } +} + static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; @@ -1647,7 +1635,6 @@ static irqreturn_t pl08x_irq(int irq, void *dev) /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; - struct pl08x_txd *tx; if (!plchan) { dev_err(&pl08x->adev->dev, @@ -1656,29 +1643,8 @@ static irqreturn_t pl08x_irq(int irq, void *dev) continue; } - spin_lock(&plchan->vc.lock); - tx = plchan->at; - if (tx) { - plchan->at = NULL; - /* - * This descriptor is done, release its mux - * reservation. - */ - pl08x_release_mux(plchan); - tx->done = true; - vchan_cookie_complete(&tx->vd); - - /* - * And start the next descriptor (if any), - * otherwise free this channel. - */ - if (vchan_next_desc(&plchan->vc)) - pl08x_start_next_txd(plchan); - else - pl08x_phy_free(plchan); - } - spin_unlock(&plchan->vc.lock); - + /* Schedule tasklet on this channel */ + tasklet_schedule(&plchan->tasklet); mask |= (1 << i); } } @@ -1688,10 +1654,16 @@ static irqreturn_t pl08x_irq(int irq, void *dev) static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) { + u32 cctl = pl08x_cctl(chan->cd->cctl); + chan->slave = true; chan->name = chan->cd->bus_id; - chan->cfg.src_addr = chan->cd->addr; - chan->cfg.dst_addr = chan->cd->addr; + chan->src_addr = chan->cd->addr; + chan->dst_addr = chan->cd->addr; + chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | + pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); + chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | + pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); } /* @@ -1721,7 +1693,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; - chan->signal = -1; if (slave) { chan->cd = &pl08x->pd->slave_channels[i]; @@ -1734,12 +1705,26 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, return -ENOMEM; } } + if (chan->cd->circular_buffer) { + dev_err(&pl08x->adev->dev, + "channel %s: circular buffers not supported\n", + chan->name); + kfree(chan); + continue; + } dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); - chan->vc.desc_free = pl08x_desc_free; - vchan_init(&chan->vc, dmadev); + chan->chan.device = dmadev; + dma_cookie_init(&chan->chan); + + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->pend_list); + tasklet_init(&chan->tasklet, pl08x_tasklet, + (unsigned long) chan); + + list_add_tail(&chan->chan.device_node, &dmadev->channels); } dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); @@ -1752,8 +1737,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) struct pl08x_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, vc.chan.device_node) { - list_del(&chan->vc.chan.device_node); + next, &dmadev->channels, chan.device_node) { + list_del(&chan->chan.device_node); kfree(chan); } } @@ -1806,7 +1791,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual memcpy channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { + list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1814,7 +1799,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual slave channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { + list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1866,6 +1851,9 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_pl08x; } + pm_runtime_set_active(&adev->dev); + pm_runtime_enable(&adev->dev); + /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -1915,6 +1903,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_lli_pool; } + spin_lock_init(&pl08x->lock); + pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!pl08x->base) { ret = -ENOMEM; @@ -1952,6 +1942,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); + ch->signal = -1; /* * Nomadik variants can have channels that are locked @@ -2016,6 +2007,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); + pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: @@ -2034,6 +2026,9 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: + pm_runtime_put(&adev->dev); + pm_runtime_disable(&adev->dev); + kfree(pl08x); out_no_pl08x: amba_release_regions(adev); diff --git a/trunk/drivers/dma/sa11x0-dma.c b/trunk/drivers/dma/sa11x0-dma.c index 5f1d2e670837..db4fcbd80c7d 100644 --- a/trunk/drivers/dma/sa11x0-dma.c +++ b/trunk/drivers/dma/sa11x0-dma.c @@ -416,27 +416,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; - struct sa11x0_dma_desc *txd; + struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; - size_t bytes = 0; ret = dma_cookie_status(&c->vc.chan, cookie, state); if (ret == DMA_SUCCESS) return ret; + if (!state) + return c->status; + spin_lock_irqsave(&c->vc.lock, flags); p = c->phy; - ret = c->status; - if (p) { - dma_addr_t addr = sa11x0_dma_pos(p); - dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + /* + * If the cookie is on our issue queue, then the residue is + * its total size. + */ + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; + } else if (!p) { + state->residue = 0; + } else { + struct sa11x0_dma_desc *txd; + size_t bytes = 0; + + if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) + txd = p->txd_done; + else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) + txd = p->txd_load; + else + txd = NULL; - txd = p->txd_done; + ret = c->status; if (txd) { + dma_addr_t addr = sa11x0_dma_pos(p); unsigned i; + dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + for (i = 0; i < txd->sglen; i++) { dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", i, txd->sg[i].addr, txd->sg[i].len); @@ -459,18 +479,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, bytes += txd->sg[i].len; } } - if (txd != p->txd_load && p->txd_load) - bytes += p->txd_load->size; - } - list_for_each_entry(txd, &c->vc.desc_issued, vd.node) { - bytes += txd->size; + state->residue = bytes; } spin_unlock_irqrestore(&c->vc.lock, flags); - if (state) - state->residue = bytes; - - dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); return ret; } diff --git a/trunk/include/linux/amba/pl08x.h b/trunk/include/linux/amba/pl08x.h index 2a5f64a11b77..02549017212a 100644 --- a/trunk/include/linux/amba/pl08x.h +++ b/trunk/include/linux/amba/pl08x.h @@ -21,9 +21,8 @@ #include #include +struct pl08x_lli; struct pl08x_driver_data; -struct pl08x_phy_chan; -struct pl08x_txd; /* Bitmasks for selecting AHB ports for DMA transfers */ enum { @@ -47,28 +46,169 @@ enum { * devices with static assignments * @muxval: a number usually used to poke into some mux regiser to * mux in the signal to this channel - * @cctl_memcpy: options for the channel control register for memcpy - * *** not used for slave channels *** + * @cctl_opt: default options for the channel control register * @addr: source/target address in physical memory for this DMA channel, * can be the address of a FIFO register for burst requests for example. * This can be left undefined if the PrimeCell API is used for configuring * this. + * @circular_buffer: whether the buffer passed in is circular and + * shall simply be looped round round (like a record baby round + * round round round) * @single: the device connected to this channel will request single DMA * transfers, not bursts. (Bursts are default.) * @periph_buses: the device connected to this channel is accessible via * these buses (use PL08X_AHB1 | PL08X_AHB2). */ struct pl08x_channel_data { - const char *bus_id; + char *bus_id; int min_signal; int max_signal; u32 muxval; - u32 cctl_memcpy; + u32 cctl; dma_addr_t addr; + bool circular_buffer; bool single; u8 periph_buses; }; +/** + * Struct pl08x_bus_data - information of source or destination + * busses for a transfer + * @addr: current address + * @maxwidth: the maximum width of a transfer on this bus + * @buswidth: the width of this bus in bytes: 1, 2 or 4 + */ +struct pl08x_bus_data { + dma_addr_t addr; + u8 maxwidth; + u8 buswidth; +}; + +/** + * struct pl08x_phy_chan - holder for the physical channels + * @id: physical index to this channel + * @lock: a lock to use when altering an instance of this struct + * @signal: the physical signal (aka channel) serving this physical channel + * right now + * @serving: the virtual channel currently being served by this physical + * channel + * @locked: channel unavailable for the system, e.g. dedicated to secure + * world + */ +struct pl08x_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + int signal; + struct pl08x_dma_chan *serving; + bool locked; +}; + +/** + * struct pl08x_sg - structure containing data per sg + * @src_addr: src address of sg + * @dst_addr: dst address of sg + * @len: transfer len in bytes + * @node: node for txd's dsg_list + */ +struct pl08x_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +/** + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @tx: async tx descriptor + * @node: node for txd list for channels + * @dsg_list: list of children sg's + * @direction: direction of transfer + * @llis_bus: DMA memory address (physical) start for the LLIs + * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd + */ +struct pl08x_txd { + struct dma_async_tx_descriptor tx; + struct list_head node; + struct list_head dsg_list; + enum dma_transfer_direction direction; + dma_addr_t llis_bus; + struct pl08x_lli *llis_va; + /* Default cctl value for LLIs */ + u32 cctl; + /* + * Settings to be put into the physical channel when we + * trigger this txd. Other registers are in llis_va[0]. + */ + u32 ccfg; +}; + +/** + * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * states + * @PL08X_CHAN_IDLE: the channel is idle + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport + * channel and is running a transfer on it + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport + * channel, but the transfer is currently paused + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport + * channel to become available (only pertains to memcpy channels) + */ +enum pl08x_dma_chan_state { + PL08X_CHAN_IDLE, + PL08X_CHAN_RUNNING, + PL08X_CHAN_PAUSED, + PL08X_CHAN_WAITING, +}; + +/** + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel + * @chan: wrappped abstract channel + * @phychan: the physical channel utilized by this channel, if there is one + * @phychan_hold: if non-zero, hold on to the physical channel even if we + * have no pending entries + * @tasklet: tasklet scheduled by the IRQ to handle actual work etc + * @name: name of channel + * @cd: channel platform data + * @runtime_addr: address for RX/TX according to the runtime config + * @runtime_direction: current direction of this channel according to + * runtime config + * @pend_list: queued transactions pending on this channel + * @at: active transaction on this channel + * @lock: a lock for this channel data + * @host: a pointer to the host (internal use) + * @state: whether the channel is idle, paused, running etc + * @slave: whether this channel is a device (slave) or for memcpy + * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave + * channels. Fill with 'true' if peripheral should be flow controller. Direction + * will be selected at Runtime. + * @waiting: a TX descriptor on this channel which is waiting for a physical + * channel to become available + */ +struct pl08x_dma_chan { + struct dma_chan chan; + struct pl08x_phy_chan *phychan; + int phychan_hold; + struct tasklet_struct tasklet; + char *name; + const struct pl08x_channel_data *cd; + dma_addr_t src_addr; + dma_addr_t dst_addr; + u32 src_cctl; + u32 dst_cctl; + enum dma_transfer_direction runtime_direction; + struct list_head pend_list; + struct pl08x_txd *at; + spinlock_t lock; + struct pl08x_driver_data *host; + enum pl08x_dma_chan_state state; + bool slave; + bool device_fc; + struct pl08x_txd *waiting; +}; + /** * struct pl08x_platform_data - the platform configuration for the PL08x * PrimeCells. @@ -89,8 +229,8 @@ struct pl08x_platform_data { const struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; - int (*get_signal)(const struct pl08x_channel_data *); - void (*put_signal)(const struct pl08x_channel_data *, int); + int (*get_signal)(struct pl08x_dma_chan *); + void (*put_signal)(struct pl08x_dma_chan *); u8 lli_buses; u8 mem_buses; };