Skip to content

Commit

Permalink
sgi-xp: create activate and notify gru message queues
Browse files Browse the repository at this point in the history
For UV add the code to create the activate and notify gru message queues.

Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
  • Loading branch information
Dean Nelson authored and H. Peter Anvin committed Nov 6, 2008
1 parent 6c1c325 commit 2525789
Show file tree
Hide file tree
Showing 2 changed files with 218 additions and 53 deletions.
12 changes: 12 additions & 0 deletions drivers/misc/sgi-xp/xpc.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,18 @@ struct xpc_vars_part_sn2 {
(XPC_RP_MACH_NASIDS(_rp) + \
xpc_nasid_mask_nlongs))

/*
* Info pertinent to a GRU message queue using a watch list for irq generation.
*/
struct xpc_gru_mq_uv {
void *address; /* address of GRU message queue */
unsigned int order; /* size of GRU message queue as a power of 2 */
int irq; /* irq raised when message is received in mq */
int mmr_blade; /* blade where watchlist was allocated from */
unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
int watchlist_num; /* number of watchlist allocatd by BIOS */
};

/*
* The activate_mq is used to send/receive GRU messages that affect XPC's
* heartbeat, partition active state, and channel state. This is UV only.
Expand Down
259 changes: 206 additions & 53 deletions drivers/misc/sgi-xp/xpc_uv.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,15 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
#include <asm/uv/uv_irq.h>
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#endif
#include "../sgi-gru/gru.h"
#include "../sgi-gru/grukservices.h"
#include "xpc.h"
Expand All @@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv;
static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);

#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_ACTIVATE_MSG_SIZE_UV)
#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"

#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_ACTIVATE_MSG_SIZE_UV)
#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"

static void *xpc_activate_mq_uv;
static void *xpc_notify_mq_uv;
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;

static int
xpc_setup_partitions_sn_uv(void)
Expand All @@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void)
return 0;
}

static void *
xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq,
static int
xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
{
#if defined CONFIG_X86_64
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
if (mq->irq < 0) {
dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
mq->irq);
}

#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode;
unsigned long mmr_value;

if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
mq->irq = SGI_XPC_ACTIVATE;
else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
mq->irq = SGI_XPC_NOTIFY;
else
return -EINVAL;

mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;

uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
#else
#error not a supported configuration
#endif

return 0;
}

static void
xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
{
#if defined CONFIG_X86_64
uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);

#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode;
unsigned long mmr_value;

mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mmr_value = 1UL << 16;

uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
#else
#error not a supported configuration
#endif
}

static int
xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;

#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
&mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
"ret=%d\n", ret);
return ret;
}
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
&mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
#else
#error not a supported configuration
#endif

mq->watchlist_num = ret;
return 0;
}

static void
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;

#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
#endif
}

static struct xpc_gru_mq_uv *
xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
irq_handler_t irq_handler)
{
enum xp_retval xp_ret;
int ret;
int nid;
int mq_order;
int pg_order;
struct page *page;
void *mq;
struct xpc_gru_mq_uv *mq;

mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
if (mq == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
"a xpc_gru_mq_uv structure\n");
ret = -ENOMEM;
goto out_1;
}

nid = cpu_to_node(cpuid);
mq_order = get_order(mq_size);
pg_order = get_order(mq_size);
mq->order = pg_order + PAGE_SHIFT;
mq_size = 1UL << mq->order;

mq->mmr_blade = uv_cpu_to_blade_id(cpu);

nid = cpu_to_node(cpu);
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
mq_order);
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
return NULL;
ret = -ENOMEM;
goto out_2;
}
mq->address = page_address(page);

mq = page_address(page);
ret = gru_create_message_queue(mq, mq_size);
ret = gru_create_message_queue(mq->address, mq_size);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
free_pages((unsigned long)mq, mq_order);
return NULL;
ret = -EINVAL;
goto out_3;
}

/* !!! Need to do some other things to set up IRQ */
/* enable generation of irq when GRU mq operation occurs to this mq */
ret = xpc_gru_mq_watchlist_alloc_uv(mq);
if (ret != 0)
goto out_3;

ret = request_irq(irq, irq_handler, 0, "xpc", NULL);
ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
if (ret != 0)
goto out_4;

ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
if (ret != 0) {
dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
irq, ret);
free_pages((unsigned long)mq, mq_order);
return NULL;
mq->irq, ret);
goto out_5;
}

/* !!! enable generation of irq when GRU mq op occurs to this mq */

/* ??? allow other partitions to access GRU mq? */
/* allow other partitions to access this GRU mq */
xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
if (xp_ret != xpSuccess) {
ret = -EACCES;
goto out_6;
}

return mq;

/* something went wrong */
out_6:
free_irq(mq->irq, NULL);
out_5:
xpc_release_gru_mq_irq_uv(mq);
out_4:
xpc_gru_mq_watchlist_free_uv(mq);
out_3:
free_pages((unsigned long)mq->address, pg_order);
out_2:
kfree(mq);
out_1:
return ERR_PTR(ret);
}

static void
xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq)
xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
{
/* ??? disallow other partitions to access GRU mq? */
unsigned int mq_size;
int pg_order;
int ret;

/* disallow other partitions to access GRU mq */
mq_size = 1UL << mq->order;
ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
BUG_ON(ret != xpSuccess);

/* unregister irq handler and release mq irq/vector mapping */
free_irq(mq->irq, NULL);
xpc_release_gru_mq_irq_uv(mq);

/* !!! disable generation of irq when GRU mq op occurs to this mq */
/* disable generation of irq when GRU mq op occurs to this mq */
xpc_gru_mq_watchlist_free_uv(mq);

free_irq(irq, NULL);
pg_order = mq->order - PAGE_SHIFT;
free_pages((unsigned long)mq->address, pg_order);

free_pages((unsigned long)mq, get_order(mq_size));
kfree(mq);
}

static enum xp_retval
Expand Down Expand Up @@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
struct xpc_partition *part;
int wakeup_hb_checker = 0;

while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) {
while (1) {
msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address);
if (msg_hdr == NULL)
break;

partid = msg_hdr->partid;
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
Expand All @@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
}
}

gru_free_message(xpc_activate_mq_uv, msg_hdr);
gru_free_message(xpc_activate_mq_uv->address, msg_hdr);
}

if (wakeup_hb_checker)
Expand Down Expand Up @@ -507,7 +667,7 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
static int
xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
{
rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv);
rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address);
return 0;
}

Expand Down Expand Up @@ -1411,22 +1571,18 @@ xpc_init_uv(void)
return -E2BIG;
}

/* ??? The cpuid argument's value is 0, is that what we want? */
/* !!! The irq argument's value isn't correct. */
xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
XPC_ACTIVATE_IRQ_NAME,
xpc_handle_activate_IRQ_uv);
if (xpc_activate_mq_uv == NULL)
return -ENOMEM;
if (IS_ERR(xpc_activate_mq_uv))
return PTR_ERR(xpc_activate_mq_uv);

/* ??? The cpuid argument's value is 0, is that what we want? */
/* !!! The irq argument's value isn't correct. */
xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
XPC_NOTIFY_IRQ_NAME,
xpc_handle_notify_IRQ_uv);
if (xpc_notify_mq_uv == NULL) {
/* !!! The irq argument's value isn't correct. */
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv,
XPC_ACTIVATE_MQ_SIZE_UV, 0);
return -ENOMEM;
if (IS_ERR(xpc_notify_mq_uv)) {
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
return PTR_ERR(xpc_notify_mq_uv);
}

return 0;
Expand All @@ -1435,9 +1591,6 @@ xpc_init_uv(void)
void
xpc_exit_uv(void)
{
/* !!! The irq argument's value isn't correct. */
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0);

/* !!! The irq argument's value isn't correct. */
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
}

0 comments on commit 2525789

Please sign in to comment.