Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 199720
b: refs/heads/master
c: ceadda0
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Jun 3, 2010
1 parent 746b903 commit 8794f0b
Show file tree
Hide file tree
Showing 78 changed files with 3,678 additions and 447 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fbae3fb1546e199ab0cd185348f8124411a1ca9d
refs/heads/master: ceadda057c000fa82e6bbe508923d8181414dea7
12 changes: 6 additions & 6 deletions trunk/Documentation/DocBook/drm.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@
</para>
<para>
If your driver supports memory management (it should!), you'll
need to set that up at load time as well. How you intialize
need to set that up at load time as well. How you initialize
it depends on which memory manager you're using, TTM or GEM.
</para>
<sect3>
Expand All @@ -399,7 +399,7 @@
aperture space for graphics devices. TTM supports both UMA devices
and devices with dedicated video RAM (VRAM), i.e. most discrete
graphics devices. If your device has dedicated RAM, supporting
TTM is desireable. TTM also integrates tightly with your
TTM is desirable. TTM also integrates tightly with your
driver specific buffer execution function. See the radeon
driver for examples.
</para>
Expand Down Expand Up @@ -443,7 +443,7 @@
likely eventually calling ttm_bo_global_init and
ttm_bo_global_release, respectively. Also like the previous
object, ttm_global_item_ref is used to create an initial reference
count for the TTM, which will call your initalization function.
count for the TTM, which will call your initialization function.
</para>
</sect3>
<sect3>
Expand Down Expand Up @@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *dev)
CRT connector and encoder combination is created. A device
specific i2c bus is also created, for fetching EDID data and
performing monitor detection. Once the process is complete,
the new connector is regsitered with sysfs, to make its
the new connector is registered with sysfs, to make its
properties available to applications.
</para>
<sect4>
Expand All @@ -581,12 +581,12 @@ void intel_crt_init(struct drm_device *dev)
<para>
For each encoder, CRTC and connector, several functions must
be provided, depending on the object type. Encoder objects
need should provide a DPMS (basically on/off) function, mode fixup
need to provide a DPMS (basically on/off) function, mode fixup
(for converting requested modes into native hardware timings),
and prepare, set and commit functions for use by the core DRM
helper functions. Connector helpers need to provide mode fetch and
validity functions as well as an encoder matching function for
returing an ideal encoder for a given connector. The core
returning an ideal encoder for a given connector. The core
connector functions include a DPMS callback, (deprecated)
save/restore routines, detection, mode probing, property handling,
and cleanup functions.
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/sysdev/fsl_msi.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
goto error_out;
}
offset = 0;
p = of_get_property(dev->node, "msi-available-ranges", &len);
p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
if (p)
offset = *p / IRQS_PER_MSI_REG;

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/sysdev/fsl_rio.c
Original file line number Diff line number Diff line change
Expand Up @@ -1426,7 +1426,7 @@ int fsl_rio_setup(struct of_device *dev)
port->iores.flags = IORESOURCE_MEM;
port->iores.name = "rio_io_win";

priv->pwirq = irq_of_parse_and_map(dev->node, 0);
priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0);
priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
Expand Down
6 changes: 3 additions & 3 deletions trunk/drivers/crypto/amcc/crypto4xx_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1158,7 +1158,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;

rc = of_address_to_resource(ofdev->node, 0, &res);
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
return -ENODEV;

Expand Down Expand Up @@ -1215,13 +1215,13 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
(unsigned long) dev);

/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
core_dev->dev->name, dev);
if (rc)
goto err_request_irq;

core_dev->dev->ce_base = of_iomap(ofdev->node, 0);
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
goto err_iomap;
Expand Down
123 changes: 65 additions & 58 deletions trunk/drivers/crypto/n2_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -251,16 +251,10 @@ static void n2_base_ctx_init(struct n2_base_ctx *ctx)
struct n2_hash_ctx {
struct n2_base_ctx base;

struct crypto_ahash *fallback;
struct crypto_ahash *fallback_tfm;
};

/* These next three members must match the layout created by
* crypto_init_shash_ops_async. This allows us to properly
* plumb requests we can't do in hardware down to the fallback
* operation, providing all of the data structures and layouts
* expected by those paths.
*/
struct ahash_request fallback_req;
struct shash_desc fallback_desc;
struct n2_hash_req_ctx {
union {
struct md5_state md5;
struct sha1_state sha1;
Expand All @@ -269,56 +263,62 @@ struct n2_hash_ctx {

unsigned char hash_key[64];
unsigned char keyed_zero_hash[32];

struct ahash_request fallback_req;
};

static int n2_hash_async_init(struct ahash_request *req)
{
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;

return crypto_ahash_init(&ctx->fallback_req);
return crypto_ahash_init(&rctx->fallback_req);
}

static int n2_hash_async_update(struct ahash_request *req)
{
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fallback_req.nbytes = req->nbytes;
ctx->fallback_req.src = req->src;
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.nbytes = req->nbytes;
rctx->fallback_req.src = req->src;

return crypto_ahash_update(&ctx->fallback_req);
return crypto_ahash_update(&rctx->fallback_req);
}

static int n2_hash_async_final(struct ahash_request *req)
{
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fallback_req.result = req->result;
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = req->result;

return crypto_ahash_final(&ctx->fallback_req);
return crypto_ahash_final(&rctx->fallback_req);
}

static int n2_hash_async_finup(struct ahash_request *req)
{
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fallback_req.nbytes = req->nbytes;
ctx->fallback_req.src = req->src;
ctx->fallback_req.result = req->result;
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.nbytes = req->nbytes;
rctx->fallback_req.src = req->src;
rctx->fallback_req.result = req->result;

return crypto_ahash_finup(&ctx->fallback_req);
return crypto_ahash_finup(&rctx->fallback_req);
}

static int n2_hash_cra_init(struct crypto_tfm *tfm)
Expand All @@ -338,7 +338,10 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm)
goto out;
}

ctx->fallback = fallback_tfm;
crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
crypto_ahash_reqsize(fallback_tfm)));

ctx->fallback_tfm = fallback_tfm;
return 0;

out:
Expand All @@ -350,7 +353,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);

crypto_free_ahash(ctx->fallback);
crypto_free_ahash(ctx->fallback_tfm);
}

static unsigned long wait_for_tail(struct spu_queue *qp)
Expand Down Expand Up @@ -399,14 +402,16 @@ static int n2_hash_async_digest(struct ahash_request *req,
* exceed 2^16.
*/
if (unlikely(req->nbytes > (1 << 16))) {
ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
ctx->fallback_req.base.flags =
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);

ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags =
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fallback_req.nbytes = req->nbytes;
ctx->fallback_req.src = req->src;
ctx->fallback_req.result = req->result;
rctx->fallback_req.nbytes = req->nbytes;
rctx->fallback_req.src = req->src;
rctx->fallback_req.result = req->result;

return crypto_ahash_digest(&ctx->fallback_req);
return crypto_ahash_digest(&rctx->fallback_req);
}

n2_base_ctx_init(&ctx->base);
Expand Down Expand Up @@ -472,9 +477,8 @@ static int n2_hash_async_digest(struct ahash_request *req,

static int n2_md5_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct md5_state *m = &ctx->u.md5;
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct md5_state *m = &rctx->u.md5;

if (unlikely(req->nbytes == 0)) {
static const char md5_zero[MD5_DIGEST_SIZE] = {
Expand All @@ -497,9 +501,8 @@ static int n2_md5_async_digest(struct ahash_request *req)

static int n2_sha1_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct sha1_state *s = &ctx->u.sha1;
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct sha1_state *s = &rctx->u.sha1;

if (unlikely(req->nbytes == 0)) {
static const char sha1_zero[SHA1_DIGEST_SIZE] = {
Expand All @@ -524,9 +527,8 @@ static int n2_sha1_async_digest(struct ahash_request *req)

static int n2_sha256_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct sha256_state *s = &ctx->u.sha256;
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct sha256_state *s = &rctx->u.sha256;

if (req->nbytes == 0) {
static const char sha256_zero[SHA256_DIGEST_SIZE] = {
Expand Down Expand Up @@ -555,9 +557,8 @@ static int n2_sha256_async_digest(struct ahash_request *req)

static int n2_sha224_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct sha256_state *s = &ctx->u.sha256;
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
struct sha256_state *s = &rctx->u.sha256;

if (req->nbytes == 0) {
static const char sha224_zero[SHA224_DIGEST_SIZE] = {
Expand Down Expand Up @@ -1398,7 +1399,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,

intr = ip->ino_table[i].intr;

dev_intrs = of_get_property(dev->node, "interrupts", NULL);
dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
if (!dev_intrs)
return -ENODEV;

Expand Down Expand Up @@ -1449,7 +1450,7 @@ static int queue_cache_init(void)
{
if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
queue_cache[HV_NCS_QTYPE_MAU - 1] =
kmem_cache_create("cwq_queue",
kmem_cache_create("mau_queue",
(MAU_NUM_ENTRIES *
MAU_ENTRY_SIZE),
MAU_ENTRY_SIZE, 0, NULL);
Expand Down Expand Up @@ -1574,7 +1575,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
id = mdesc_get_property(mdesc, tgt, "id", NULL);
if (table[*id] != NULL) {
dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
dev->node->full_name);
dev->dev.of_node->full_name);
return -EINVAL;
}
cpu_set(*id, p->sharing);
Expand All @@ -1595,7 +1596,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
if (!p) {
dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
dev->node->full_name);
dev->dev.of_node->full_name);
return -ENOMEM;
}

Expand Down Expand Up @@ -1684,7 +1685,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
const unsigned int *reg;
u64 node;

reg = of_get_property(dev->node, "reg", NULL);
reg = of_get_property(dev->dev.of_node, "reg", NULL);
if (!reg)
return -ENODEV;

Expand Down Expand Up @@ -1836,7 +1837,7 @@ static int __devinit n2_crypto_probe(struct of_device *dev,

n2_spu_driver_version();

full_name = dev->node->full_name;
full_name = dev->dev.of_node->full_name;
pr_info("Found N2CP at %s\n", full_name);

np = alloc_n2cp();
Expand Down Expand Up @@ -1948,7 +1949,7 @@ static int __devinit n2_mau_probe(struct of_device *dev,

n2_spu_driver_version();

full_name = dev->node->full_name;
full_name = dev->dev.of_node->full_name;
pr_info("Found NCP at %s\n", full_name);

mp = alloc_ncp();
Expand Down Expand Up @@ -2034,8 +2035,11 @@ static struct of_device_id n2_crypto_match[] = {
MODULE_DEVICE_TABLE(of, n2_crypto_match);

static struct of_platform_driver n2_crypto_driver = {
.name = "n2cp",
.match_table = n2_crypto_match,
.driver = {
.name = "n2cp",
.owner = THIS_MODULE,
.of_match_table = n2_crypto_match,
},
.probe = n2_crypto_probe,
.remove = __devexit_p(n2_crypto_remove),
};
Expand All @@ -2055,8 +2059,11 @@ static struct of_device_id n2_mau_match[] = {
MODULE_DEVICE_TABLE(of, n2_mau_match);

static struct of_platform_driver n2_mau_driver = {
.name = "ncp",
.match_table = n2_mau_match,
.driver = {
.name = "ncp",
.owner = THIS_MODULE,
.of_match_table = n2_mau_match,
},
.probe = n2_mau_probe,
.remove = __devexit_p(n2_mau_remove),
};
Expand Down
Loading

0 comments on commit 8794f0b

Please sign in to comment.