Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 348008
b: refs/heads/master
c: 637704c
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Dec 23, 2012
1 parent 72f138d commit b532451
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 79 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0b255e927d47b550620dfd3475ee74b0f52e09c8
refs/heads/master: 637704cbc95c02d18741b4a6e7a5d2397f8b28ce
19 changes: 8 additions & 11 deletions trunk/drivers/media/usb/uvc/uvc_ctrl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,7 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,

ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
if (ctrl == NULL) {
ret = -ENOENT;
ret = -EINVAL;
goto done;
}

Expand Down Expand Up @@ -1099,13 +1099,12 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
return -ERESTARTSYS;

ctrl = uvc_find_control(chain, query_menu->id, &mapping);
if (ctrl == NULL) {
ret = -ENOENT;
if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) {
ret = -EINVAL;
goto done;
}

if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU ||
query_menu->index >= mapping->menu_count) {
if (query_menu->index >= mapping->menu_count) {
ret = -EINVAL;
goto done;
}
Expand Down Expand Up @@ -1264,7 +1263,7 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)

ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
if (ctrl == NULL) {
ret = -ENOENT;
ret = -EINVAL;
goto done;
}

Expand Down Expand Up @@ -1415,7 +1414,7 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,

ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -ENOENT;
return -EINVAL;

return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
}
Expand All @@ -1432,10 +1431,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
int ret;

ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -ENOENT;
if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
return -EACCES;
if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0)
return -EINVAL;

/* Clamp out of range values. */
switch (mapping->v4l2_type) {
Expand Down
12 changes: 5 additions & 7 deletions trunk/drivers/media/usb/uvc/uvc_v4l2.c
Original file line number Diff line number Diff line change
Expand Up @@ -607,10 +607,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)

ret = uvc_ctrl_get(chain, &xctrl);
uvc_ctrl_rollback(handle);
if (ret < 0)
return ret == -ENOENT ? -EINVAL : ret;

ctrl->value = xctrl.value;
if (ret >= 0)
ctrl->value = xctrl.value;
break;
}

Expand All @@ -634,7 +632,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = uvc_ctrl_set(chain, &xctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
return ret == -ENOENT ? -EINVAL : ret;
return ret;
}
ret = uvc_ctrl_commit(handle, &xctrl, 1);
if (ret == 0)
Expand All @@ -661,7 +659,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
uvc_ctrl_rollback(handle);
ctrls->error_idx = ret == -ENOENT
? ctrls->count : i;
return ret == -ENOENT ? -EINVAL : ret;
return ret;
}
}
ctrls->error_idx = 0;
Expand Down Expand Up @@ -691,7 +689,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ctrls->error_idx = (ret == -ENOENT &&
cmd == VIDIOC_S_EXT_CTRLS)
? ctrls->count : i;
return ret == -ENOENT ? -EINVAL : ret;
return ret;
}
}

Expand Down
105 changes: 45 additions & 60 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2452,12 +2452,16 @@ static bool zone_balanced(struct zone *zone, int order,
}

/*
* pgdat_balanced is used when checking if a node is balanced for high-order
* allocations. Only zones that meet watermarks and are in a zone allowed
* by the callers classzone_idx are added to balanced_pages. The total of
* balanced pages must be at least 25% of the zones allowed by classzone_idx
* for the node to be considered balanced. Forcing all zones to be balanced
* for high orders can cause excessive reclaim when there are imbalanced zones.
* pgdat_balanced() is used when checking if a node is balanced.
*
* For order-0, all zones must be balanced!
*
* For high-order allocations only zones that meet watermarks and are in a
* zone allowed by the callers classzone_idx are added to balanced_pages. The
* total of balanced pages must be at least 25% of the zones allowed by
* classzone_idx for the node to be considered balanced. Forcing all zones to
* be balanced for high orders can cause excessive reclaim when there are
* imbalanced zones.
* The choice of 25% is due to
* o a 16M DMA zone that is balanced will not balance a zone on any
* reasonable sized machine
Expand All @@ -2467,17 +2471,43 @@ static bool zone_balanced(struct zone *zone, int order,
* Similarly, on x86-64 the Normal zone would need to be at least 1G
* to balance a node on its own. These seemed like reasonable ratios.
*/
static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
int classzone_idx)
static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
{
unsigned long present_pages = 0;
unsigned long balanced_pages = 0;
int i;

for (i = 0; i <= classzone_idx; i++)
present_pages += pgdat->node_zones[i].present_pages;
/* Check the watermark levels */
for (i = 0; i <= classzone_idx; i++) {
struct zone *zone = pgdat->node_zones + i;

/* A special case here: if zone has no page, we think it's balanced */
return balanced_pages >= (present_pages >> 2);
if (!populated_zone(zone))
continue;

present_pages += zone->present_pages;

/*
* A special case here:
*
* balance_pgdat() skips over all_unreclaimable after
* DEF_PRIORITY. Effectively, it considers them balanced so
* they must be considered balanced here as well!
*/
if (zone->all_unreclaimable) {
balanced_pages += zone->present_pages;
continue;
}

if (zone_balanced(zone, order, 0, i))
balanced_pages += zone->present_pages;
else if (!order)
return false;
}

if (order)
return balanced_pages >= (present_pages >> 2);
else
return true;
}

/*
Expand All @@ -2489,10 +2519,6 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
int classzone_idx)
{
int i;
unsigned long balanced = 0;
bool all_zones_ok = true;

/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
if (remaining)
return false;
Expand All @@ -2511,39 +2537,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
return false;
}

/* Check the watermark levels */
for (i = 0; i <= classzone_idx; i++) {
struct zone *zone = pgdat->node_zones + i;

if (!populated_zone(zone))
continue;

/*
* balance_pgdat() skips over all_unreclaimable after
* DEF_PRIORITY. Effectively, it considers them balanced so
* they must be considered balanced here as well if kswapd
* is to sleep
*/
if (zone->all_unreclaimable) {
balanced += zone->present_pages;
continue;
}

if (!zone_balanced(zone, order, 0, i))
all_zones_ok = false;
else
balanced += zone->present_pages;
}

/*
* For high-order requests, the balanced zones must contain at least
* 25% of the nodes pages for kswapd to sleep. For order-0, all zones
* must be balanced
*/
if (order)
return pgdat_balanced(pgdat, balanced, classzone_idx);
else
return all_zones_ok;
return pgdat_balanced(pgdat, order, classzone_idx);
}

/*
Expand Down Expand Up @@ -2571,7 +2565,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
int *classzone_idx)
{
struct zone *unbalanced_zone;
unsigned long balanced;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long total_scanned;
Expand Down Expand Up @@ -2605,7 +2598,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
int has_under_min_watermark_zone = 0;

unbalanced_zone = NULL;
balanced = 0;

/*
* Scan in the highmem->dma direction for the highest
Expand Down Expand Up @@ -2761,8 +2753,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* speculatively avoid congestion waits
*/
zone_clear_flag(zone, ZONE_CONGESTED);
if (i <= *classzone_idx)
balanced += zone->present_pages;
}

}
Expand All @@ -2776,7 +2766,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
pfmemalloc_watermark_ok(pgdat))
wake_up(&pgdat->pfmemalloc_wait);

if (!unbalanced_zone || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
if (pgdat_balanced(pgdat, order, *classzone_idx))
break; /* kswapd: all done */
/*
* OK, kswapd is getting into trouble. Take a nap, then take
Expand All @@ -2800,12 +2790,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
} while (--sc.priority >= 0);
out:

/*
* order-0: All zones must meet high watermark for a balanced node
* high-order: Balanced zones must make up at least 25% of the node
* for the node to be balanced
*/
if (unbalanced_zone && (!order || !pgdat_balanced(pgdat, balanced, *classzone_idx))) {
if (!pgdat_balanced(pgdat, order, *classzone_idx)) {
cond_resched();

try_to_freeze();
Expand Down

0 comments on commit b532451

Please sign in to comment.