diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 34 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_pegasusq.c | 28 | ||||
-rw-r--r-- | drivers/cpufreq/powernow-k8.c | 56 | ||||
-rw-r--r-- | drivers/input/keyboard/cypress/cypress-touchkey.c | 41 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 62 | ||||
-rw-r--r-- | drivers/md/dm-log.c | 20 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 152 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 71 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 60 | ||||
-rw-r--r-- | drivers/md/dm.c | 61 | ||||
-rw-r--r-- | drivers/md/dm.h | 2 | ||||
-rw-r--r-- | drivers/tty/n_tty.c | 2 |
12 files changed, 365 insertions, 224 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index e151adc..a87dc5d 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -85,7 +85,6 @@ struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_iowait; cputime64_t prev_cpu_wall; - unsigned int prev_cpu_wall_delta; cputime64_t prev_cpu_nice; struct cpufreq_policy *cur_policy; struct delayed_work work; @@ -609,10 +608,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) unsigned int idle_time, wall_time, iowait_time; unsigned int load, load_freq; int freq_avg; - bool deep_sleep_detected = false; - /* the evil magic numbers, only 2 at least */ - const unsigned int deep_sleep_backoff = 10; - const unsigned int deep_sleep_factor = 5; j_dbs_info = &per_cpu(od_cpu_dbs_info, j); @@ -623,32 +618,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - /* - * Ignore wall delta jitters in both directions. An - * exceptionally long wall_time will likely result - * idle but it was waken up to do work so the next - * slice is less likely to want to run at low - * frequency. Let's evaluate the next slice instead of - * the idle long one that passed already and it's too - * late to reduce in frequency. As opposed an - * exceptionally short slice that just run at low - * frequency is unlikely to be idle, but we may go - * back to idle pretty soon and that not idle slice - * already passed. If short slices will keep coming - * after a series of long slices the exponential - * backoff will converge faster and we'll react faster - * to high load. As opposed we'll decay slower - * towards low load and long idle times. - */ - if (j_dbs_info->prev_cpu_wall_delta > - wall_time * deep_sleep_factor || - j_dbs_info->prev_cpu_wall_delta * deep_sleep_factor < - wall_time) - deep_sleep_detected = true; - j_dbs_info->prev_cpu_wall_delta = - (j_dbs_info->prev_cpu_wall_delta * deep_sleep_backoff - + wall_time) / (deep_sleep_backoff+1); - idle_time = (unsigned int) cputime64_sub(cur_idle_time, j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; @@ -674,9 +643,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) idle_time += jiffies_to_usecs(cur_nice_jiffies); } - if (deep_sleep_detected) - continue; - /* * For the purpose of ondemand, waiting for disk IO is an * indication that you're performance critical, and not that diff --git a/drivers/cpufreq/cpufreq_pegasusq.c b/drivers/cpufreq/cpufreq_pegasusq.c index 208a991..c44af54 100644 --- a/drivers/cpufreq/cpufreq_pegasusq.c +++ b/drivers/cpufreq/cpufreq_pegasusq.c @@ -165,7 +165,7 @@ static unsigned int get_nr_run_avg(void) #define DEF_START_DELAY (0) #define UP_THRESHOLD_AT_MIN_FREQ (40) -#define FREQ_FOR_RESPONSIVENESS (400000) +#define FREQ_FOR_RESPONSIVENESS (500000) #define HOTPLUG_DOWN_INDEX (0) #define HOTPLUG_UP_INDEX (1) @@ -306,7 +306,7 @@ static void apply_hotplug_lock(void) lock = atomic_read(&g_hotplug_lock); flag = lock - online; - if (flag == 0) + if (lock == 0 || flag == 0) return; work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; @@ -380,6 +380,13 @@ void cpufreq_pegasusq_min_cpu_unlock(void) lock = atomic_read(&g_hotplug_lock); if (lock == 0) return; +#if defined(CONFIG_HAS_EARLYSUSPEND) && EARLYSUSPEND_HOTPLUGLOCK + if (dbs_tuners_ins.early_suspend >= 0) { /* if LCD is off-state */ + atomic_set(&g_hotplug_lock, 1); + apply_hotplug_lock(); + return; + } +#endif flag = lock - online; if (flag >= 0) return; @@ -484,6 +491,21 @@ static ssize_t show_hotplug_lock(struct kobject *kobj, return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); } +static ssize_t show_cpucore_table(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + ssize_t count = 0; + int i; + + for (i = CONFIG_NR_CPUS; i > 0; i--) { + count += sprintf(&buf[count], "%d ", i); + } + count += sprintf(&buf[count], "\n"); + + return count; +} + + #define show_hotplug_param(file_name, num_core, up_down) \ static ssize_t show_##file_name##_##num_core##_##up_down \ (struct kobject *kobj, struct attribute *attr, char *buf) \ @@ -813,6 +835,7 @@ define_one_global_rw(max_cpu_lock); define_one_global_rw(min_cpu_lock); define_one_global_rw(hotplug_lock); define_one_global_rw(dvfs_debug); +define_one_global_ro(cpucore_table); static struct attribute *dbs_attributes[] = { &sampling_rate_min.attr, @@ -846,6 +869,7 @@ static struct attribute *dbs_attributes[] = { &hotplug_rq_3_0.attr, &hotplug_rq_3_1.attr, &hotplug_rq_4_0.attr, + &cpucore_table.attr, NULL }; diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index f6cd315..ad683ec 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -32,6 +32,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/cpumask.h> +#include <linux/sched.h> /* for current / set_cpus_allowed() */ #include <linux/io.h> #include <linux/delay.h> @@ -1131,23 +1132,16 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, return res; } -struct powernowk8_target_arg { - struct cpufreq_policy *pol; - unsigned targfreq; - unsigned relation; -}; - -static long powernowk8_target_fn(void *arg) +/* Driver entry point to switch to the target frequency */ +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) { - struct powernowk8_target_arg *pta = arg; - struct cpufreq_policy *pol = pta->pol; - unsigned targfreq = pta->targfreq; - unsigned relation = pta->relation; + cpumask_var_t oldmask; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; unsigned int newstate; - int ret; + int ret = -EIO; if (!data) return -EINVAL; @@ -1155,16 +1149,29 @@ static long powernowk8_target_fn(void *arg) checkfid = data->currfid; checkvid = data->currvid; + /* only run on specific CPU from here on. */ + /* This is poor form: use a workqueue or smp_call_function_single */ + if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_copy(oldmask, tsk_cpus_allowed(current)); + set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); + + if (smp_processor_id() != pol->cpu) { + printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); + goto err_out; + } + if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing targ, change pending bit set\n"); - return -EIO; + goto err_out; } pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) - return -EIO; + goto err_out; if (cpu_family != CPU_HW_PSTATE) { pr_debug("targ: curr fid 0x%x, vid 0x%x\n", @@ -1182,7 +1189,7 @@ static long powernowk8_target_fn(void *arg) if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) - return -EIO; + goto err_out; mutex_lock(&fidvid_mutex); @@ -1195,8 +1202,9 @@ static long powernowk8_target_fn(void *arg) ret = transition_frequency_fidvid(data, newstate); if (ret) { printk(KERN_ERR PFX "transition frequency failed\n"); + ret = 1; mutex_unlock(&fidvid_mutex); - return 1; + goto err_out; } mutex_unlock(&fidvid_mutex); @@ -1205,18 +1213,12 @@ static long powernowk8_target_fn(void *arg) data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); + ret = 0; - return 0; -} - -/* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) -{ - struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, - .relation = relation }; - - return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); +err_out: + set_cpus_allowed_ptr(current, oldmask); + free_cpumask_var(oldmask); + return ret; } /* Driver entry point to verify the policy and range of frequencies */ diff --git a/drivers/input/keyboard/cypress/cypress-touchkey.c b/drivers/input/keyboard/cypress/cypress-touchkey.c index d73ec15..8aafaad 100644 --- a/drivers/input/keyboard/cypress/cypress-touchkey.c +++ b/drivers/input/keyboard/cypress/cypress-touchkey.c @@ -673,6 +673,10 @@ static irqreturn_t touchkey_interrupt(int irq, void *dev_id) set_touchkey_debug('a'); + if (!atomic_read(&tkey_i2c->keypad_enable)) { + return; + } + retry = 3; while (retry--) { ret = i2c_touchkey_read(tkey_i2c->client, KEYCODE_REG, data, 3); @@ -1411,6 +1415,40 @@ static ssize_t set_touchkey_firm_status_show(struct device *dev, return count; } +static ssize_t sec_keypad_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct touchkey_i2c *tkey_i2c = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", atomic_read(&tkey_i2c->keypad_enable)); +} + +static ssize_t sec_keypad_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct touchkey_i2c *tkey_i2c = dev_get_drvdata(dev); + + unsigned int val = 0; + sscanf(buf, "%d", &val); + val = (val == 0 ? 0 : 1); + atomic_set(&tkey_i2c->keypad_enable, val); + if (val) { + set_bit(KEY_BACK, tkey_i2c->input_dev->keybit); + set_bit(KEY_MENU, tkey_i2c->input_dev->keybit); + set_bit(KEY_HOME, tkey_i2c->input_dev->keybit); + } else { + clear_bit(KEY_BACK, tkey_i2c->input_dev->keybit); + clear_bit(KEY_MENU, tkey_i2c->input_dev->keybit); + clear_bit(KEY_HOME, tkey_i2c->input_dev->keybit); + } + input_sync(tkey_i2c->input_dev); + + return count; +} + +static DEVICE_ATTR(keypad_enable, S_IRUGO|S_IWUSR, sec_keypad_enable_show, + sec_keypad_enable_store); + static DEVICE_ATTR(recommended_version, S_IRUGO | S_IWUSR | S_IWGRP, touch_version_read, touch_version_write); static DEVICE_ATTR(updated_version, S_IRUGO | S_IWUSR | S_IWGRP, @@ -1496,6 +1534,7 @@ static struct attribute *touchkey_attributes[] = { &dev_attr_touchkey_threshold.attr, &dev_attr_autocal_enable.attr, &dev_attr_autocal_stat.attr, + &dev_attr_keypad_enable.attr, #endif NULL, }; @@ -1563,6 +1602,8 @@ static int i2c_touchkey_probe(struct i2c_client *client, set_bit(LED_MISC, input_dev->ledbit); set_bit(EV_KEY, input_dev->evbit); + atomic_set(&tkey_i2c->keypad_enable, 1); + for (i = 1; i < touchkey_count; i++) set_bit(touchkey_keycode[i], input_dev->keybit); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index da5f5f2..3e64023 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -30,7 +30,6 @@ #include <linux/device-mapper.h> #define DM_MSG_PREFIX "crypt" -#define MESG_STR(x) x, sizeof(x) /* * context holding the current state of a multi-part conversion @@ -238,7 +237,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { memset(iv, 0, cc->iv_size); - *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); + *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); return 0; } @@ -247,7 +246,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { memset(iv, 0, cc->iv_size); - *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); + *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); return 0; } @@ -414,7 +413,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; memset(iv, 0, cc->iv_size); - *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); + *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); crypto_cipher_encrypt_one(essiv_tfm, iv, iv); return 0; @@ -1577,11 +1576,17 @@ bad_mem: static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; - unsigned int key_size; + unsigned int key_size, opt_params; unsigned long long tmpll; int ret; + struct dm_arg_set as; + const char *opt_string; + + static struct dm_arg _args[] = { + {0, 1, "Invalid number of feature args"}, + }; - if (argc != 5) { + if (argc < 5) { ti->error = "Not enough arguments"; return -EINVAL; } @@ -1650,6 +1655,30 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->start = tmpll; + argv += 5; + argc -= 5; + + /* Optional parameters */ + if (argc) { + as.argc = argc; + as.argv = argv; + + ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); + if (ret) + goto bad; + + opt_string = dm_shift_arg(&as); + + if (opt_params == 1 && opt_string && + !strcasecmp(opt_string, "allow_discards")) + ti->num_discard_requests = 1; + else if (opt_params) { + ret = -EINVAL; + ti->error = "Invalid feature arguments"; + goto bad; + } + } + ret = -ENOMEM; #if 1 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); @@ -1691,9 +1720,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, struct dm_crypt_io *io; struct crypt_config *cc; - if (bio->bi_rw & REQ_FLUSH) { + /* + * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. + * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight + * - for REQ_DISCARD caller must use flush if IO ordering matters + */ + if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { cc = ti->private; bio->bi_bdev = cc->dev->bdev; + if (bio_sectors(bio)) + bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); return DM_MAPIO_REMAPPED; } @@ -1736,6 +1772,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, cc->dev->name, (unsigned long long)cc->start); + + if (ti->num_discard_requests) + DMEMIT(" 1 allow_discards"); + break; } return 0; @@ -1779,12 +1819,12 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) if (argc < 2) goto error; - if (!strnicmp(argv[0], MESG_STR("key"))) { + if (!strcasecmp(argv[0], "key")) { if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { DMWARN("not suspended during key manipulation."); return -EINVAL; } - if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { + if (argc == 3 && !strcasecmp(argv[1], "set")) { ret = crypt_set_key(cc, argv[2]); if (ret) return ret; @@ -1792,7 +1832,7 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) ret = cc->iv_gen_ops->init(cc); return ret; } - if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { + if (argc == 2 && !strcasecmp(argv[1], "wipe")) { if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { ret = cc->iv_gen_ops->wipe(cc); if (ret) @@ -1832,7 +1872,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 10, 0}, + .version = {1, 11, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 948e3f4..5f06fb6 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy); #define MIRROR_DISK_VERSION 2 #define LOG_OFFSET 2 -struct log_header { - uint32_t magic; +struct log_header_disk { + __le32 magic; /* * Simple, incrementing version. no backward * compatibility. */ + __le32 version; + __le64 nr_regions; +} __packed; + +struct log_header_core { + uint32_t magic; uint32_t version; - sector_t nr_regions; + uint64_t nr_regions; }; struct log_c { @@ -239,10 +245,10 @@ struct log_c { int log_dev_failed; int log_dev_flush_failed; struct dm_dev *log_dev; - struct log_header header; + struct log_header_core header; struct dm_io_region header_location; - struct log_header *disk_header; + struct log_header_disk *disk_header; }; /* @@ -271,14 +277,14 @@ static inline void log_clear_bit(struct log_c *l, /*---------------------------------------------------------------- * Header IO *--------------------------------------------------------------*/ -static void header_to_disk(struct log_header *core, struct log_header *disk) +static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk) { disk->magic = cpu_to_le32(core->magic); disk->version = cpu_to_le32(core->version); disk->nr_regions = cpu_to_le64(core->nr_regions); } -static void header_from_disk(struct log_header *core, struct log_header *disk) +static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk) { core->magic = le32_to_cpu(disk->magic); core->version = le32_to_cpu(disk->version); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 70373bf..d13c431 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -22,7 +22,6 @@ #include <asm/atomic.h> #define DM_MSG_PREFIX "multipath" -#define MESG_STR(x) x, sizeof(x) #define DM_PG_INIT_DELAY_MSECS 2000 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) @@ -505,80 +504,29 @@ static void trigger_event(struct work_struct *work) * <#paths> <#per-path selector args> * [<path> [<arg>]* ]+ ]+ *---------------------------------------------------------------*/ -struct param { - unsigned min; - unsigned max; - char *error; -}; - -static int read_param(struct param *param, char *str, unsigned *v, char **error) -{ - if (!str || - (sscanf(str, "%u", v) != 1) || - (*v < param->min) || - (*v > param->max)) { - *error = param->error; - return -EINVAL; - } - - return 0; -} - -struct arg_set { - unsigned argc; - char **argv; -}; - -static char *shift(struct arg_set *as) -{ - char *r; - - if (as->argc) { - as->argc--; - r = *as->argv; - as->argv++; - return r; - } - - return NULL; -} - -static void consume(struct arg_set *as, unsigned n) -{ - BUG_ON (as->argc < n); - as->argc -= n; - as->argv += n; -} - -static int parse_path_selector(struct arg_set *as, struct priority_group *pg, +static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, struct dm_target *ti) { int r; struct path_selector_type *pst; unsigned ps_argc; - static struct param _params[] = { + static struct dm_arg _args[] = { {0, 1024, "invalid number of path selector args"}, }; - pst = dm_get_path_selector(shift(as)); + pst = dm_get_path_selector(dm_shift_arg(as)); if (!pst) { ti->error = "unknown path selector type"; return -EINVAL; } - r = read_param(_params, shift(as), &ps_argc, &ti->error); + r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); if (r) { dm_put_path_selector(pst); return -EINVAL; } - if (ps_argc > as->argc) { - dm_put_path_selector(pst); - ti->error = "not enough arguments for path selector"; - return -EINVAL; - } - r = pst->create(&pg->ps, ps_argc, as->argv); if (r) { dm_put_path_selector(pst); @@ -587,12 +535,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg, } pg->ps.type = pst; - consume(as, ps_argc); + dm_consume_args(as, ps_argc); return 0; } -static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, +static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, struct dm_target *ti) { int r; @@ -609,7 +557,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, if (!p) return ERR_PTR(-ENOMEM); - r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), + r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), &p->path.dev); if (r) { ti->error = "error getting device"; @@ -660,16 +608,16 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, return ERR_PTR(r); } -static struct priority_group *parse_priority_group(struct arg_set *as, +static struct priority_group *parse_priority_group(struct dm_arg_set *as, struct multipath *m) { - static struct param _params[] = { + static struct dm_arg _args[] = { {1, 1024, "invalid number of paths"}, {0, 1024, "invalid number of selector args"} }; int r; - unsigned i, nr_selector_args, nr_params; + unsigned i, nr_selector_args, nr_args; struct priority_group *pg; struct dm_target *ti = m->ti; @@ -693,26 +641,26 @@ static struct priority_group *parse_priority_group(struct arg_set *as, /* * read the paths */ - r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); + r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); if (r) goto bad; - r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); + r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); if (r) goto bad; - nr_params = 1 + nr_selector_args; + nr_args = 1 + nr_selector_args; for (i = 0; i < pg->nr_pgpaths; i++) { struct pgpath *pgpath; - struct arg_set path_args; + struct dm_arg_set path_args; - if (as->argc < nr_params) { + if (as->argc < nr_args) { ti->error = "not enough path parameters"; r = -EINVAL; goto bad; } - path_args.argc = nr_params; + path_args.argc = nr_args; path_args.argv = as->argv; pgpath = parse_path(&path_args, &pg->ps, ti); @@ -723,7 +671,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, pgpath->pg = pg; list_add_tail(&pgpath->list, &pg->pgpaths); - consume(as, nr_params); + dm_consume_args(as, nr_args); } return pg; @@ -733,28 +681,23 @@ static struct priority_group *parse_priority_group(struct arg_set *as, return ERR_PTR(r); } -static int parse_hw_handler(struct arg_set *as, struct multipath *m) +static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) { unsigned hw_argc; int ret; struct dm_target *ti = m->ti; - static struct param _params[] = { + static struct dm_arg _args[] = { {0, 1024, "invalid number of hardware handler args"}, }; - if (read_param(_params, shift(as), &hw_argc, &ti->error)) + if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) return -EINVAL; if (!hw_argc) return 0; - if (hw_argc > as->argc) { - ti->error = "not enough arguments for hardware handler"; - return -EINVAL; - } - - m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); + m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); request_module("scsi_dh_%s", m->hw_handler_name); if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; @@ -778,7 +721,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) j = sprintf(p, "%s", as->argv[i]); } - consume(as, hw_argc - 1); + dm_consume_args(as, hw_argc - 1); return 0; fail: @@ -787,52 +730,45 @@ fail: return ret; } -static int parse_features(struct arg_set *as, struct multipath *m) +static int parse_features(struct dm_arg_set *as, struct multipath *m) { int r; unsigned argc; struct dm_target *ti = m->ti; - const char *param_name; + const char *arg_name; - static struct param _params[] = { + static struct dm_arg _args[] = { {0, 5, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, }; - r = read_param(_params, shift(as), &argc, &ti->error); + r = dm_read_arg_group(_args, as, &argc, &ti->error); if (r) return -EINVAL; if (!argc) return 0; - if (argc > as->argc) { - ti->error = "not enough arguments for features"; - return -EINVAL; - } - do { - param_name = shift(as); + arg_name = dm_shift_arg(as); argc--; - if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { + if (!strcasecmp(arg_name, "queue_if_no_path")) { r = queue_if_no_path(m, 1, 0); continue; } - if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && + if (!strcasecmp(arg_name, "pg_init_retries") && (argc >= 1)) { - r = read_param(_params + 1, shift(as), - &m->pg_init_retries, &ti->error); + r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); argc--; continue; } - if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) && + if (!strcasecmp(arg_name, "pg_init_delay_msecs") && (argc >= 1)) { - r = read_param(_params + 2, shift(as), - &m->pg_init_delay_msecs, &ti->error); + r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); argc--; continue; } @@ -847,15 +783,15 @@ static int parse_features(struct arg_set *as, struct multipath *m) static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) { - /* target parameters */ - static struct param _params[] = { + /* target arguments */ + static struct dm_arg _args[] = { {0, 1024, "invalid number of priority groups"}, {0, 1024, "invalid initial priority group number"}, }; int r; struct multipath *m; - struct arg_set as; + struct dm_arg_set as; unsigned pg_count = 0; unsigned next_pg_num; @@ -876,11 +812,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, if (r) goto bad; - r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); + r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); if (r) goto bad; - r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); + r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); if (r) goto bad; @@ -1510,10 +1446,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) } if (argc == 1) { - if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { + if (!strcasecmp(argv[0], "queue_if_no_path")) { r = queue_if_no_path(m, 1, 0); goto out; - } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { + } else if (!strcasecmp(argv[0], "fail_if_no_path")) { r = queue_if_no_path(m, 0, 0); goto out; } @@ -1524,18 +1460,18 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) goto out; } - if (!strnicmp(argv[0], MESG_STR("disable_group"))) { + if (!strcasecmp(argv[0], "disable_group")) { r = bypass_pg_num(m, argv[1], 1); goto out; - } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { + } else if (!strcasecmp(argv[0], "enable_group")) { r = bypass_pg_num(m, argv[1], 0); goto out; - } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { + } else if (!strcasecmp(argv[0], "switch_group")) { r = switch_pg_num(m, argv[1]); goto out; - } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) + } else if (!strcasecmp(argv[0], "reinstate_path")) action = reinstate_path; - else if (!strnicmp(argv[0], MESG_STR("fail_path"))) + else if (!strcasecmp(argv[0], "fail_path")) action = fail_path; else { DMWARN("Unrecognised multipath message received."); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index e4ecadf..39becbe 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -58,25 +58,30 @@ #define NUM_SNAPSHOT_HDR_CHUNKS 1 struct disk_header { - uint32_t magic; + __le32 magic; /* * Is this snapshot valid. There is no way of recovering * an invalid snapshot. */ - uint32_t valid; + __le32 valid; /* * Simple, incrementing version. no backward * compatibility. */ - uint32_t version; + __le32 version; /* In sectors */ - uint32_t chunk_size; -}; + __le32 chunk_size; +} __packed; struct disk_exception { + __le64 old_chunk; + __le64 new_chunk; +} __packed; + +struct core_exception { uint64_t old_chunk; uint64_t new_chunk; }; @@ -396,32 +401,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) } static void read_exception(struct pstore *ps, - uint32_t index, struct disk_exception *result) + uint32_t index, struct core_exception *result) { - struct disk_exception *e = get_exception(ps, index); + struct disk_exception *de = get_exception(ps, index); /* copy it */ - result->old_chunk = le64_to_cpu(e->old_chunk); - result->new_chunk = le64_to_cpu(e->new_chunk); + result->old_chunk = le64_to_cpu(de->old_chunk); + result->new_chunk = le64_to_cpu(de->new_chunk); } static void write_exception(struct pstore *ps, - uint32_t index, struct disk_exception *de) + uint32_t index, struct core_exception *e) { - struct disk_exception *e = get_exception(ps, index); + struct disk_exception *de = get_exception(ps, index); /* copy it */ - e->old_chunk = cpu_to_le64(de->old_chunk); - e->new_chunk = cpu_to_le64(de->new_chunk); + de->old_chunk = cpu_to_le64(e->old_chunk); + de->new_chunk = cpu_to_le64(e->new_chunk); } static void clear_exception(struct pstore *ps, uint32_t index) { - struct disk_exception *e = get_exception(ps, index); + struct disk_exception *de = get_exception(ps, index); /* clear it */ - e->old_chunk = 0; - e->new_chunk = 0; + de->old_chunk = 0; + de->new_chunk = 0; } /* @@ -437,13 +442,13 @@ static int insert_exceptions(struct pstore *ps, { int r; unsigned int i; - struct disk_exception de; + struct core_exception e; /* presume the area is full */ *full = 1; for (i = 0; i < ps->exceptions_per_area; i++) { - read_exception(ps, i, &de); + read_exception(ps, i, &e); /* * If the new_chunk is pointing at the start of @@ -451,7 +456,7 @@ static int insert_exceptions(struct pstore *ps, * is we know that we've hit the end of the * exceptions. Therefore the area is not full. */ - if (de.new_chunk == 0LL) { + if (e.new_chunk == 0LL) { ps->current_committed = i; *full = 0; break; @@ -460,13 +465,13 @@ static int insert_exceptions(struct pstore *ps, /* * Keep track of the start of the free chunks. */ - if (ps->next_free <= de.new_chunk) - ps->next_free = de.new_chunk + 1; + if (ps->next_free <= e.new_chunk) + ps->next_free = e.new_chunk + 1; /* * Otherwise we add the exception to the snapshot. */ - r = callback(callback_context, de.old_chunk, de.new_chunk); + r = callback(callback_context, e.old_chunk, e.new_chunk); if (r) return r; } @@ -641,12 +646,12 @@ static void persistent_commit_exception(struct dm_exception_store *store, { unsigned int i; struct pstore *ps = get_info(store); - struct disk_exception de; + struct core_exception ce; struct commit_callback *cb; - de.old_chunk = e->old_chunk; - de.new_chunk = e->new_chunk; - write_exception(ps, ps->current_committed++, &de); + ce.old_chunk = e->old_chunk; + ce.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &ce); /* * Add the callback to the back of the array. This code @@ -701,7 +706,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, chunk_t *last_new_chunk) { struct pstore *ps = get_info(store); - struct disk_exception de; + struct core_exception ce; int nr_consecutive; int r; @@ -722,9 +727,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, ps->current_committed = ps->exceptions_per_area; } - read_exception(ps, ps->current_committed - 1, &de); - *last_old_chunk = de.old_chunk; - *last_new_chunk = de.new_chunk; + read_exception(ps, ps->current_committed - 1, &ce); + *last_old_chunk = ce.old_chunk; + *last_new_chunk = ce.new_chunk; /* * Find number of consecutive chunks within the current area, @@ -733,9 +738,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, for (nr_consecutive = 1; nr_consecutive < ps->current_committed; nr_consecutive++) { read_exception(ps, ps->current_committed - 1 - nr_consecutive, - &de); - if (de.old_chunk != *last_old_chunk - nr_consecutive || - de.new_chunk != *last_new_chunk - nr_consecutive) + &ce); + if (ce.old_chunk != *last_old_chunk - nr_consecutive || + ce.new_chunk != *last_new_chunk - nr_consecutive) break; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ebdae6e..92cd686 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -539,8 +539,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, * If not we'll force DM to use PAGE_SIZE or * smaller I/O, just to be safe. */ - - if (q->merge_bvec_fn && !ti->type->merge) + if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) blk_limits_max_hw_sectors(limits, (unsigned int) (PAGE_SIZE >> 9)); return 0; @@ -802,6 +801,63 @@ int dm_table_add_target(struct dm_table *t, const char *type, return r; } +/* + * Target argument parsing helpers. + */ +static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error, unsigned grouped) +{ + const char *arg_str = dm_shift_arg(arg_set); + + if (!arg_str || + (sscanf(arg_str, "%u", value) != 1) || + (*value < arg->min) || + (*value > arg->max) || + (grouped && arg_set->argc < *value)) { + *error = arg->error; + return -EINVAL; + } + + return 0; +} + +int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error) +{ + return validate_next_arg(arg, arg_set, value, error, 0); +} +EXPORT_SYMBOL(dm_read_arg); + +int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error) +{ + return validate_next_arg(arg, arg_set, value, error, 1); +} +EXPORT_SYMBOL(dm_read_arg_group); + +const char *dm_shift_arg(struct dm_arg_set *as) +{ + char *r; + + if (as->argc) { + as->argc--; + r = *as->argv; + as->argv++; + return r; + } + + return NULL; +} +EXPORT_SYMBOL(dm_shift_arg); + +void dm_consume_args(struct dm_arg_set *as, unsigned num_args) +{ + BUG_ON(as->argc < num_args); + as->argc -= num_args; + as->argv += num_args; +} +EXPORT_SYMBOL(dm_consume_args); + static int dm_table_set_type(struct dm_table *t) { unsigned i; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9f472d5..1eeef9c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -111,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); #define DMF_FREEING 3 #define DMF_DELETING 4 #define DMF_NOFLUSH_SUSPENDING 5 +#define DMF_MERGE_IS_OPTIONAL 6 /* * Work processed by per-device workqueue. @@ -2018,6 +2019,59 @@ static void __set_size(struct mapped_device *md, sector_t size) } /* + * Return 1 if the queue has a compulsory merge_bvec_fn function. + * + * If this function returns 0, then the device is either a non-dm + * device without a merge_bvec_fn, or it is a dm device that is + * able to split any bios it receives that are too big. + */ +int dm_queue_merge_is_compulsory(struct request_queue *q) +{ + struct mapped_device *dev_md; + + if (!q->merge_bvec_fn) + return 0; + + if (q->make_request_fn == dm_request) { + dev_md = q->queuedata; + if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) + return 0; + } + + return 1; +} + +static int dm_device_merge_is_compulsory(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct block_device *bdev = dev->bdev; + struct request_queue *q = bdev_get_queue(bdev); + + return dm_queue_merge_is_compulsory(q); +} + +/* + * Return 1 if it is acceptable to ignore merge_bvec_fn based + * on the properties of the underlying devices. + */ +static int dm_table_merge_is_optional(struct dm_table *table) +{ + unsigned i = 0; + struct dm_target *ti; + + while (i < dm_table_get_num_targets(table)) { + ti = dm_table_get_target(table, i++); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) + return 0; + } + + return 1; +} + +/* * Returns old map, which caller must destroy. */ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, @@ -2027,6 +2081,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, struct request_queue *q = md->queue; sector_t size; unsigned long flags; + int merge_is_optional; size = dm_table_get_size(t); @@ -2052,10 +2107,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, __bind_mempools(md, t); + merge_is_optional = dm_table_merge_is_optional(t); + write_lock_irqsave(&md->map_lock, flags); old_map = md->map; md->map = t; dm_table_set_restrictions(t, q, limits); + if (merge_is_optional) + set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); + else + clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); write_unlock_irqrestore(&md->map_lock, flags); return old_map; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 1aaf167..6745dbd 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -66,6 +66,8 @@ int dm_table_alloc_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); +int dm_queue_merge_is_compulsory(struct request_queue *q); + void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); void dm_set_md_type(struct mapped_device *md, unsigned type); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index d7164bf..4484d8d 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1989,7 +1989,9 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, tty->ops->flush_chars(tty); } else { while (nr > 0) { + mutex_lock(&tty->output_lock); c = tty->ops->write(tty, b, nr); + mutex_unlock(&tty->output_lock); if (c < 0) { retval = c; goto break_out; |