diff options
451 files changed, 13873 insertions, 9593 deletions
diff --git a/Documentation/keys.txt b/Documentation/keys.txt index 3115488..6304db5 100644 --- a/Documentation/keys.txt +++ b/Documentation/keys.txt @@ -860,24 +860,6 @@ The structure has a number of fields, some of which are mandatory: It is safe to sleep in this method. - (*) int (*duplicate)(struct key *key, const struct key *source); - - If this type of key can be duplicated, then this method should be - provided. It is called to copy the payload attached to the source into the - new key. The data length on the new key will have been updated and the - quota adjusted already. - - This method will be called with the source key's semaphore read-locked to - prevent its payload from being changed, thus RCU constraints need not be - applied to the source key. - - This method does not have to lock the destination key in order to attach a - payload. The fact that KEY_FLAG_INSTANTIATED is not set in key->flags - prevents anything else from gaining access to the key. - - It is safe to sleep in this method. - - (*) int (*update)(struct key *key, const void *data, size_t datalen); If this type of key can be updated, then this method should be provided. diff --git a/Documentation/md.txt b/Documentation/md.txt index 23e6cce..03a13c4 100644 --- a/Documentation/md.txt +++ b/Documentation/md.txt @@ -51,6 +51,30 @@ superblock can be autodetected and run at boot time. The kernel parameter "raid=partitionable" (or "raid=part") means that all auto-detected arrays are assembled as partitionable. +Boot time assembly of degraded/dirty arrays +------------------------------------------- + +If a raid5 or raid6 array is both dirty and degraded, it could have +undetectable data corruption. This is because the fact that it is +'dirty' means that the parity cannot be trusted, and the fact that it +is degraded means that some datablocks are missing and cannot reliably +be reconstructed (due to no parity). + +For this reason, md will normally refuse to start such an array. This +requires the sysadmin to take action to explicitly start the array +desipite possible corruption. This is normally done with + mdadm --assemble --force .... + +This option is not really available if the array has the root +filesystem on it. In order to support this booting from such an +array, md supports a module parameter "start_dirty_degraded" which, +when set to 1, bypassed the checks and will allows dirty degraded +arrays to be started. + +So, to boot with a root filesystem of a dirty degraded raid[56], use + + md-mod.start_dirty_degraded=1 + Superblock formats ------------------ @@ -141,6 +165,70 @@ All md devices contain: in a fully functional array. If this is not yet known, the file will be empty. If an array is being resized (not currently possible) this will contain the larger of the old and new sizes. + Some raid level (RAID1) allow this value to be set while the + array is active. This will reconfigure the array. Otherwise + it can only be set while assembling an array. + + chunk_size + This is the size if bytes for 'chunks' and is only relevant to + raid levels that involve striping (1,4,5,6,10). The address space + of the array is conceptually divided into chunks and consecutive + chunks are striped onto neighbouring devices. + The size should be atleast PAGE_SIZE (4k) and should be a power + of 2. This can only be set while assembling an array + + component_size + For arrays with data redundancy (i.e. not raid0, linear, faulty, + multipath), all components must be the same size - or at least + there must a size that they all provide space for. This is a key + part or the geometry of the array. It is measured in sectors + and can be read from here. Writing to this value may resize + the array if the personality supports it (raid1, raid5, raid6), + and if the component drives are large enough. + + metadata_version + This indicates the format that is being used to record metadata + about the array. It can be 0.90 (traditional format), 1.0, 1.1, + 1.2 (newer format in varying locations) or "none" indicating that + the kernel isn't managing metadata at all. + + level + The raid 'level' for this array. The name will often (but not + always) be the same as the name of the module that implements the + level. To be auto-loaded the module must have an alias + md-$LEVEL e.g. md-raid5 + This can be written only while the array is being assembled, not + after it is started. + + new_dev + This file can be written but not read. The value written should + be a block device number as major:minor. e.g. 8:0 + This will cause that device to be attached to the array, if it is + available. It will then appear at md/dev-XXX (depending on the + name of the device) and further configuration is then possible. + + sync_speed_min + sync_speed_max + This are similar to /proc/sys/dev/raid/speed_limit_{min,max} + however they only apply to the particular array. + If no value has been written to these, of if the word 'system' + is written, then the system-wide value is used. If a value, + in kibibytes-per-second is written, then it is used. + When the files are read, they show the currently active value + followed by "(local)" or "(system)" depending on whether it is + a locally set or system-wide value. + + sync_completed + This shows the number of sectors that have been completed of + whatever the current sync_action is, followed by the number of + sectors in total that could need to be processed. The two + numbers are separated by a '/' thus effectively showing one + value, a fraction of the process that is complete. + + sync_speed + This shows the current actual speed, in K/sec, of the current + sync_action. It is averaged over the last 30 seconds. + As component devices are added to an md array, they appear in the 'md' directory as new directories named @@ -167,6 +255,38 @@ Each directory contains: of being recoverred to This list make grow in future. + errors + An approximate count of read errors that have been detected on + this device but have not caused the device to be evicted from + the array (either because they were corrected or because they + happened while the array was read-only). When using version-1 + metadata, this value persists across restarts of the array. + + This value can be written while assembling an array thus + providing an ongoing count for arrays with metadata managed by + userspace. + + slot + This gives the role that the device has in the array. It will + either be 'none' if the device is not active in the array + (i.e. is a spare or has failed) or an integer less than the + 'raid_disks' number for the array indicating which possition + it currently fills. This can only be set while assembling an + array. A device for which this is set is assumed to be working. + + offset + This gives the location in the device (in sectors from the + start) where data from the array will be stored. Any part of + the device before this offset us not touched, unless it is + used for storing metadata (Formats 1.1 and 1.2). + + size + The amount of the device, after the offset, that can be used + for storage of data. This will normally be the same as the + component_size. This can be written while assembling an + array. If a value less than the current component_size is + written, component_size will be reduced to this value. + An active md device will also contain and entry for each active device in the array. These are named diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index f5ebda5..bd4ffb5 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt @@ -41,3 +41,14 @@ to. Writing to this file will accept one of It will only change to 'firmware' or 'platform' if the system supports it. +/sys/power/image_size controls the size of the image created by +the suspend-to-disk mechanism. It can be written a string +representing a non-negative integer that will be used as an upper +limit of the image size, in megabytes. The suspend-to-disk mechanism will +do its best to ensure the image size will not exceed that number. However, +if this turns out to be impossible, it will try to suspend anyway using the +smallest image possible. In particular, if "0" is written to this file, the +suspend image will be as small as possible. + +Reading from this file will display the current image size limit, which +is set to 500 MB by default. diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt index b0d5084..cd0fcd8 100644 --- a/Documentation/power/swsusp.txt +++ b/Documentation/power/swsusp.txt @@ -27,6 +27,11 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state echo platform > /sys/power/disk; echo disk > /sys/power/state +If you want to limit the suspend image size to N megabytes, do + +echo N > /sys/power/image_size + +before suspend (it is limited to 500 MB by default). Encrypted suspend image: ------------------------ diff --git a/MAINTAINERS b/MAINTAINERS index e9db0d6..cb536bb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -258,6 +258,13 @@ P: Ivan Kokshaysky M: ink@jurassic.park.msu.ru S: Maintained for 2.4; PCI support for 2.6. +AMD GEODE PROCESSOR/CHIPSET SUPPORT +P: Jordan Crouse +M: info-linux@geode.amd.com +L: info-linux@geode.amd.com +W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html +S: Supported + APM DRIVER P: Stephen Rothwell M: sfr@canb.auug.org.au diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 786491f9..153337f 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -40,6 +40,19 @@ config GENERIC_IOMAP bool default n +config GENERIC_HARDIRQS + bool + default y + +config GENERIC_IRQ_PROBE + bool + default y + +config AUTO_IRQ_AFFINITY + bool + depends on SMP + default y + source "init/Kconfig" diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index 24ae9a3..f3e98f8 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -175,7 +175,6 @@ EXPORT_SYMBOL(up); */ #ifdef CONFIG_SMP -EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(flush_tlb_mm); EXPORT_SYMBOL(flush_tlb_range); EXPORT_SYMBOL(flush_tlb_page); diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index b6114f5..76be5cf 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -32,214 +32,25 @@ #include <asm/io.h> #include <asm/uaccess.h> -/* - * Controller mappings for all interrupt sources: - */ -irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { - [0 ... NR_IRQS-1] = { - .handler = &no_irq_type, - .lock = SPIN_LOCK_UNLOCKED - } -}; - -static void register_irq_proc(unsigned int irq); - volatile unsigned long irq_err_count; -/* - * Special irq handlers. - */ - -irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) -{ - return IRQ_NONE; -} - -/* - * Generic no controller code - */ - -static void no_irq_enable_disable(unsigned int irq) { } -static unsigned int no_irq_startup(unsigned int irq) { return 0; } - -static void -no_irq_ack(unsigned int irq) +void ack_bad_irq(unsigned int irq) { irq_err_count++; printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); } -struct hw_interrupt_type no_irq_type = { - .typename = "none", - .startup = no_irq_startup, - .shutdown = no_irq_enable_disable, - .enable = no_irq_enable_disable, - .disable = no_irq_enable_disable, - .ack = no_irq_ack, - .end = no_irq_enable_disable, -}; - -int -handle_IRQ_event(unsigned int irq, struct pt_regs *regs, - struct irqaction *action) -{ - int status = 1; /* Force the "do bottom halves" bit */ - int ret; - - do { - if (!(action->flags & SA_INTERRUPT)) - local_irq_enable(); - else - local_irq_disable(); - - ret = action->handler(irq, action->dev_id, regs); - if (ret == IRQ_HANDLED) - status |= action->flags; - action = action->next; - } while (action); - if (status & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); - local_irq_disable(); - - return status; -} - -/* - * Generic enable/disable code: this just calls - * down into the PIC-specific version for the actual - * hardware disable after having gotten the irq - * controller lock. - */ -void inline -disable_irq_nosync(unsigned int irq) -{ - irq_desc_t *desc = irq_desc + irq; - unsigned long flags; - - spin_lock_irqsave(&desc->lock, flags); - if (!desc->depth++) { - desc->status |= IRQ_DISABLED; - desc->handler->disable(irq); - } - spin_unlock_irqrestore(&desc->lock, flags); -} - -/* - * Synchronous version of the above, making sure the IRQ is - * no longer running on any other IRQ.. - */ -void -disable_irq(unsigned int irq) -{ - disable_irq_nosync(irq); - synchronize_irq(irq); -} - -void -enable_irq(unsigned int irq) -{ - irq_desc_t *desc = irq_desc + irq; - unsigned long flags; - - spin_lock_irqsave(&desc->lock, flags); - switch (desc->depth) { - case 1: { - unsigned int status = desc->status & ~IRQ_DISABLED; - desc->status = status; - if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { - desc->status = status | IRQ_REPLAY; - hw_resend_irq(desc->handler,irq); - } - desc->handler->enable(irq); - /* fall-through */ - } - default: - desc->depth--; - break; - case 0: - printk(KERN_ERR "enable_irq() unbalanced from %p\n", - __builtin_return_address(0)); - } - spin_unlock_irqrestore(&desc->lock, flags); -} - -int -setup_irq(unsigned int irq, struct irqaction * new) -{ - int shared = 0; - struct irqaction *old, **p; - unsigned long flags; - irq_desc_t *desc = irq_desc + irq; - - if (desc->handler == &no_irq_type) - return -ENOSYS; - - /* - * Some drivers like serial.c use request_irq() heavily, - * so we have to be careful not to interfere with a - * running system. - */ - if (new->flags & SA_SAMPLE_RANDOM) { - /* - * This function might sleep, we want to call it first, - * outside of the atomic block. - * Yes, this might clear the entropy pool if the wrong - * driver is attempted to be loaded, without actually - * installing a new handler, but is this really a problem, - * only the sysadmin is able to do this. - */ - rand_initialize_irq(irq); - } - - /* - * The following block of code has to be executed atomically - */ - spin_lock_irqsave(&desc->lock,flags); - p = &desc->action; - if ((old = *p) != NULL) { - /* Can't share interrupts unless both agree to */ - if (!(old->flags & new->flags & SA_SHIRQ)) { - spin_unlock_irqrestore(&desc->lock,flags); - return -EBUSY; - } - - /* add new interrupt at end of irq queue */ - do { - p = &old->next; - old = *p; - } while (old); - shared = 1; - } - - *p = new; - - if (!shared) { - desc->depth = 0; - desc->status &= - ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS); - desc->handler->startup(irq); - } - spin_unlock_irqrestore(&desc->lock,flags); - - return 0; -} - -static struct proc_dir_entry * root_irq_dir; -static struct proc_dir_entry * irq_dir[NR_IRQS]; - #ifdef CONFIG_SMP -static struct proc_dir_entry * smp_affinity_entry[NR_IRQS]; static char irq_user_affinity[NR_IRQS]; -static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; -static void -select_smp_affinity(int irq) +int +select_smp_affinity(unsigned int irq) { static int last_cpu; int cpu = last_cpu + 1; - if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq]) - return; + if (!irq_desc[irq].handler->set_affinity || irq_user_affinity[irq]) + return 1; while (!cpu_possible(cpu)) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); @@ -247,208 +58,10 @@ select_smp_affinity(int irq) irq_affinity[irq] = cpumask_of_cpu(cpu); irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu)); + return 0; } - -static int -irq_affinity_read_proc (char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]); - if (count - len < 2) - return -EINVAL; - len += sprintf(page + len, "\n"); - return len; -} - -static int -irq_affinity_write_proc(struct file *file, const char __user *buffer, - unsigned long count, void *data) -{ - int irq = (long) data, full_count = count, err; - cpumask_t new_value; - - if (!irq_desc[irq].handler->set_affinity) - return -EIO; - - err = cpumask_parse(buffer, count, new_value); - - /* The special value 0 means release control of the - affinity to kernel. */ - cpus_and(new_value, new_value, cpu_online_map); - if (cpus_empty(new_value)) { - irq_user_affinity[irq] = 0; - select_smp_affinity(irq); - } - /* Do not allow disabling IRQs completely - it's a too easy - way to make the system unusable accidentally :-) At least - one online CPU still has to be targeted. */ - else { - irq_affinity[irq] = new_value; - irq_user_affinity[irq] = 1; - irq_desc[irq].handler->set_affinity(irq, new_value); - } - - return full_count; -} - #endif /* CONFIG_SMP */ -#define MAX_NAMELEN 10 - -static void -register_irq_proc (unsigned int irq) -{ - char name [MAX_NAMELEN]; - - if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) || - irq_dir[irq]) - return; - - memset(name, 0, MAX_NAMELEN); - sprintf(name, "%d", irq); - - /* create /proc/irq/1234 */ - irq_dir[irq] = proc_mkdir(name, root_irq_dir); - -#ifdef CONFIG_SMP - if (irq_desc[irq].handler->set_affinity) { - struct proc_dir_entry *entry; - /* create /proc/irq/1234/smp_affinity */ - entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); - - if (entry) { - entry->nlink = 1; - entry->data = (void *)(long)irq; - entry->read_proc = irq_affinity_read_proc; - entry->write_proc = irq_affinity_write_proc; - } - - smp_affinity_entry[irq] = entry; - } -#endif -} - -void -init_irq_proc (void) -{ - int i; - - /* create /proc/irq */ - root_irq_dir = proc_mkdir("irq", NULL); - -#ifdef CONFIG_SMP - /* create /proc/irq/prof_cpu_mask */ - create_prof_cpu_mask(root_irq_dir); -#endif - - /* - * Create entries for all existing IRQs. - */ - for (i = 0; i < ACTUAL_NR_IRQS; i++) { - if (irq_desc[i].handler == &no_irq_type) - continue; - register_irq_proc(i); - } -} - -int -request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), - unsigned long irqflags, const char * devname, void *dev_id) -{ - int retval; - struct irqaction * action; - - if (irq >= ACTUAL_NR_IRQS) - return -EINVAL; - if (!handler) - return -EINVAL; - -#if 1 - /* - * Sanity-check: shared interrupts should REALLY pass in - * a real dev-ID, otherwise we'll have trouble later trying - * to figure out which interrupt is which (messes up the - * interrupt freeing logic etc). - */ - if ((irqflags & SA_SHIRQ) && !dev_id) { - printk(KERN_ERR - "Bad boy: %s (at %p) called us without a dev_id!\n", - devname, __builtin_return_address(0)); - } -#endif - - action = (struct irqaction *) - kmalloc(sizeof(struct irqaction), GFP_KERNEL); - if (!action) - return -ENOMEM; - - action->handler = handler; - action->flags = irqflags; - cpus_clear(action->mask); - action->name = devname; - action->next = NULL; - action->dev_id = dev_id; - -#ifdef CONFIG_SMP - select_smp_affinity(irq); -#endif - - retval = setup_irq(irq, action); - if (retval) - kfree(action); - return retval; -} - -EXPORT_SYMBOL(request_irq); - -void -free_irq(unsigned int irq, void *dev_id) -{ - irq_desc_t *desc; - struct irqaction **p; - unsigned long flags; - - if (irq >= ACTUAL_NR_IRQS) { - printk(KERN_CRIT "Trying to free IRQ%d\n", irq); - return; - } - - desc = irq_desc + irq; - spin_lock_irqsave(&desc->lock,flags); - p = &desc->action; - for (;;) { - struct irqaction * action = *p; - if (action) { - struct irqaction **pp = p; - p = &action->next; - if (action->dev_id != dev_id) - continue; - - /* Found - now remove it from the list of entries. */ - *pp = action->next; - if (!desc->action) { - desc->status |= IRQ_DISABLED; - desc->handler->shutdown(irq); - } - spin_unlock_irqrestore(&desc->lock,flags); - -#ifdef CONFIG_SMP - /* Wait to make sure it's not being used on - another CPU. */ - while (desc->status & IRQ_INPROGRESS) - barrier(); -#endif - kfree(action); - return; - } - printk(KERN_ERR "Trying to free free IRQ%d\n",irq); - spin_unlock_irqrestore(&desc->lock,flags); - return; - } -} - -EXPORT_SYMBOL(free_irq); - int show_interrupts(struct seq_file *p, void *v) { @@ -531,10 +144,6 @@ handle_irq(int irq, struct pt_regs * regs) * 0 return value means that this irq is already being * handled by some other CPU. (or is disabled) */ - int cpu = smp_processor_id(); - irq_desc_t *desc = irq_desc + irq; - struct irqaction * action; - unsigned int status; static unsigned int illegal_count=0; if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { @@ -546,229 +155,8 @@ handle_irq(int irq, struct pt_regs * regs) } irq_enter(); - kstat_cpu(cpu).irqs[irq]++; - spin_lock_irq(&desc->lock); /* mask also the higher prio events */ - desc->handler->ack(irq); - /* - * REPLAY is when Linux resends an IRQ that was dropped earlier. - * WAITING is used by probe to mark irqs that are being tested. - */ - status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); - status |= IRQ_PENDING; /* we _want_ to handle it */ - - /* - * If the IRQ is disabled for whatever reason, we cannot - * use the action we have. - */ - action = NULL; - if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { - action = desc->action; - status &= ~IRQ_PENDING; /* we commit to handling */ - status |= IRQ_INPROGRESS; /* we are handling it */ - } - desc->status = status; - - /* - * If there is no IRQ handler or it was disabled, exit early. - * Since we set PENDING, if another processor is handling - * a different instance of this same irq, the other processor - * will take care of it. - */ - if (!action) - goto out; - - /* - * Edge triggered interrupts need to remember pending events. - * This applies to any hw interrupts that allow a second - * instance of the same irq to arrive while we are in handle_irq - * or in the handler. But the code here only handles the _second_ - * instance of the irq, not the third or fourth. So it is mostly - * useful for irq hardware that does not mask cleanly in an - * SMP environment. - */ - for (;;) { - spin_unlock(&desc->lock); - handle_IRQ_event(irq, regs, action); - spin_lock(&desc->lock); - - if (!(desc->status & IRQ_PENDING) - || (desc->status & IRQ_LEVEL)) - break; - desc->status &= ~IRQ_PENDING; - } - desc->status &= ~IRQ_INPROGRESS; -out: - /* - * The ->end() handler has to deal with interrupts which got - * disabled while the handler was running. - */ - desc->handler->end(irq); - spin_unlock(&desc->lock); - + local_irq_disable(); + __do_IRQ(irq, regs); + local_irq_enable(); irq_exit(); } - -/* - * IRQ autodetection code.. - * - * This depends on the fact that any interrupt that - * comes in on to an unassigned handler will get stuck - * with "IRQ_WAITING" cleared and the interrupt - * disabled. - */ -unsigned long -probe_irq_on(void) -{ - int i; - irq_desc_t *desc; - unsigned long delay; - unsigned long val; - - /* Something may have generated an irq long ago and we want to - flush such a longstanding irq before considering it as spurious. */ - for (i = NR_IRQS-1; i >= 0; i--) { - desc = irq_desc + i; - - spin_lock_irq(&desc->lock); - if (!irq_desc[i].action) - irq_desc[i].handler->startup(i); - spin_unlock_irq(&desc->lock); - } - - /* Wait for longstanding interrupts to trigger. */ - for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) - /* about 20ms delay */ barrier(); - - /* enable any unassigned irqs (we must startup again here because - if a longstanding irq happened in the previous stage, it may have - masked itself) first, enable any unassigned irqs. */ - for (i = NR_IRQS-1; i >= 0; i--) { - desc = irq_desc + i; - - spin_lock_irq(&desc->lock); - if (!desc->action) { - desc->status |= IRQ_AUTODETECT | IRQ_WAITING; - if (desc->handler->startup(i)) - desc->status |= IRQ_PENDING; - } - spin_unlock_irq(&desc->lock); - } - - /* - * Wait for spurious interrupts to trigger - */ - for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) - /* about 100ms delay */ barrier(); - - /* - * Now filter out any obviously spurious interrupts - */ - val = 0; - for (i=0; i<NR_IRQS; i++) { - irq_desc_t *desc = irq_desc + i; - unsigned int status; - - spin_lock_irq(&desc->lock); - status = desc->status; - - if (status & IRQ_AUTODETECT) { - /* It triggered already - consider it spurious. */ - if (!(status & IRQ_WAITING)) { - desc->status = status & ~IRQ_AUTODETECT; - desc->handler->shutdown(i); - } else - if (i < 32) - val |= 1 << i; - } - spin_unlock_irq(&desc->lock); - } - - return val; -} - -EXPORT_SYMBOL(probe_irq_on); - -/* - * Return a mask of triggered interrupts (this - * can handle only legacy ISA interrupts). - */ -unsigned int -probe_irq_mask(unsigned long val) -{ - int i; - unsigned int mask; - - mask = 0; - for (i = 0; i < NR_IRQS; i++) { - irq_desc_t *desc = irq_desc + i; - unsigned int status; - - spin_lock_irq(&desc->lock); - status = desc->status; - - if (status & IRQ_AUTODETECT) { - /* We only react to ISA interrupts */ - if (!(status & IRQ_WAITING)) { - if (i < 16) - mask |= 1 << i; - } - - desc->status = status & ~IRQ_AUTODETECT; - desc->handler->shutdown(i); - } - spin_unlock_irq(&desc->lock); - } - - return mask & val; -} - -/* - * Get the result of the IRQ probe.. A negative result means that - * we have several candidates (but we return the lowest-numbered - * one). - */ - -int -probe_irq_off(unsigned long val) -{ - int i, irq_found, nr_irqs; - - nr_irqs = 0; - irq_found = 0; - for (i=0; i<NR_IRQS; i++) { - irq_desc_t *desc = irq_desc + i; - unsigned int status; - - spin_lock_irq(&desc->lock); - status = desc->status; - - if (status & IRQ_AUTODETECT) { - if (!(status & IRQ_WAITING)) { - if (!nr_irqs) - irq_found = i; - nr_irqs++; - } - desc->status = status & ~IRQ_AUTODETECT; - desc->handler->shutdown(i); - } - spin_unlock_irq(&desc->lock); - } - - if (nr_irqs > 1) - irq_found = -irq_found; - return irq_found; -} - -EXPORT_SYMBOL(probe_irq_off); - -#ifdef CONFIG_SMP -void synchronize_irq(unsigned int irq) -{ - /* is there anything to synchronize with? */ - if (!irq_desc[irq].action) - return; - - while (irq_desc[irq].status & IRQ_INPROGRESS) - barrier(); -} -#endif diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c index b72e6a9..34528da 100644 --- a/arch/cris/arch-v10/kernel/kgdb.c +++ b/arch/cris/arch-v10/kernel/kgdb.c @@ -569,12 +569,6 @@ gdb_cris_strtol (const char *s, char **endptr, int base) return x; } -int -double_this(int x) -{ - return 2 * x; -} - /********************************* Register image ****************************/ /* Copy the content of a register image into another. The size n is the size of the register image. Due to struct assignment generation of diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile index 981c2c7..422f30e 100644 --- a/arch/frv/kernel/Makefile +++ b/arch/frv/kernel/Makefile @@ -20,3 +20,4 @@ obj-$(CONFIG_FUJITSU_MB93493) += irq-mb93493.o obj-$(CONFIG_PM) += pm.o cmode.o obj-$(CONFIG_MB93093_PDK) += pm-mb93093.o obj-$(CONFIG_SYSCTL) += sysctl.o +obj-$(CONFIG_FUTEX) += futex.o diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index ad10ea5..5f65483 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -1076,7 +1076,7 @@ __entry_work_notifysig: LEDS 0x6410 ori.p gr4,#0,gr8 call do_notify_resume - bra __entry_return_direct + bra __entry_resume_userspace # perform syscall entry tracing __syscall_trace_entry: diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c new file mode 100644 index 0000000..eae874a --- /dev/null +++ b/arch/frv/kernel/futex.c @@ -0,0 +1,242 @@ +/* futex.c: futex operations + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/futex.h> +#include <asm/futex.h> +#include <asm/errno.h> +#include <asm/uaccess.h> + +/* + * the various futex operations; MMU fault checking is ignored under no-MMU + * conditions + */ +static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) +{ + int oldval, ret; + + asm("0: \n" + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ + " ckeq icc3,cc7 \n" + "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ + "2: cst.p %3,%M0 ,cc3,#1 \n" + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ + " beq icc3,#0,0b \n" + " setlos 0,%2 \n" + "3: \n" + ".subsection 2 \n" + "4: setlos %5,%2 \n" + " bra 3b \n" + ".previous \n" + ".section __ex_table,\"a\" \n" + " .balign 8 \n" + " .long 1b,4b \n" + " .long 2b,4b \n" + ".previous" + : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) + : "3"(oparg), "i"(-EFAULT) + : "memory", "cc7", "cc3", "icc3" + ); + + *_oldval = oldval; + return ret; +} + +static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) +{ + int oldval, ret; + + asm("0: \n" + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ + " ckeq icc3,cc7 \n" + "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ + " add %1,%3,%3 \n" + "2: cst.p %3,%M0 ,cc3,#1 \n" + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ + " beq icc3,#0,0b \n" + " setlos 0,%2 \n" + "3: \n" + ".subsection 2 \n" + "4: setlos %5,%2 \n" + " bra 3b \n" + ".previous \n" + ".section __ex_table,\"a\" \n" + " .balign 8 \n" + " .long 1b,4b \n" + " .long 2b,4b \n" + ".previous" + : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) + : "3"(oparg), "i"(-EFAULT) + : "memory", "cc7", "cc3", "icc3" + ); + + *_oldval = oldval; + return ret; +} + +static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) +{ + int oldval, ret; + + asm("0: \n" + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ + " ckeq icc3,cc7 \n" + "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ + " or %1,%3,%3 \n" + "2: cst.p %3,%M0 ,cc3,#1 \n" + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ + " beq icc3,#0,0b \n" + " setlos 0,%2 \n" + "3: \n" + ".subsection 2 \n" + "4: setlos %5,%2 \n" + " bra 3b \n" + ".previous \n" + ".section __ex_table,\"a\" \n" + " .balign 8 \n" + " .long 1b,4b \n" + " .long 2b,4b \n" + ".previous" + : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) + : "3"(oparg), "i"(-EFAULT) + : "memory", "cc7", "cc3", "icc3" + ); + + *_oldval = oldval; + return ret; +} + +static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) +{ + int oldval, ret; + + asm("0: \n" + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ + " ckeq icc3,cc7 \n" + "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ + " and %1,%3,%3 \n" + "2: cst.p %3,%M0 ,cc3,#1 \n" + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ + " beq icc3,#0,0b \n" + " setlos 0,%2 \n" + "3: \n" + ".subsection 2 \n" + "4: setlos %5,%2 \n" + " bra 3b \n" + ".previous \n" + ".section __ex_table,\"a\" \n" + " .balign 8 \n" + " .long 1b,4b \n" + " .long 2b,4b \n" + ".previous" + : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) + : "3"(oparg), "i"(-EFAULT) + : "memory", "cc7", "cc3", "icc3" + ); + + *_oldval = oldval; + return ret; +} + +static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) +{ + int oldval, ret; + + asm("0: \n" + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ + " ckeq icc3,cc7 \n" + "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ + " xor %1,%3,%3 \n" + "2: cst.p %3,%M0 ,cc3,#1 \n" + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ + " beq icc3,#0,0b \n" + " setlos 0,%2 \n" + "3: \n" + ".subsection 2 \n" + "4: setlos %5,%2 \n" + " bra 3b \n" + ".previous \n" + ".section __ex_table,\"a\" \n" + " .balign 8 \n" + " .long 1b,4b \n" + " .long 2b,4b \n" + ".previous" + : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg) + : "3"(oparg), "i"(-EFAULT) + : "memory", "cc7", "cc3", "icc3" + ); + + *_oldval = oldval; + return ret; +} + +/*****************************************************************************/ +/* + * do the futex operations + */ +int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval); + break; + case FUTEX_OP_ADD: + ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval); + break; + case FUTEX_OP_OR: + ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval); + break; + case FUTEX_OP_ANDN: + ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval); + break; + case FUTEX_OP_XOR: + ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval); + break; + default: + ret = -ENOSYS; + break; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; break; + } + } + + return ret; + +} /* end futex_atomic_op_inuser() */ diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index d4ccc07..5b7146f 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c @@ -35,7 +35,7 @@ struct fdpic_func_descriptor { unsigned long GOT; }; -asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); +static int do_signal(sigset_t *oldset); /* * Atomically swap in the new signal mask, and wait for a signal. @@ -55,7 +55,7 @@ asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask) while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); - if (do_signal(__frame, &saveset)) + if (do_signal(&saveset)) /* return the signal number as the return value of this function * - this is an utterly evil hack. syscalls should not invoke do_signal() * as entry.S sets regs->gr8 to the return value of the system call @@ -91,7 +91,7 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); - if (do_signal(__frame, &saveset)) + if (do_signal(&saveset)) /* return the signal number as the return value of this function * - this is an utterly evil hack. syscalls should not invoke do_signal() * as entry.S sets regs->gr8 to the return value of the system call @@ -276,13 +276,12 @@ static int setup_sigcontext(struct sigcontext __user *sc, unsigned long mask) * Determine which stack to use.. */ static inline void __user *get_sigframe(struct k_sigaction *ka, - struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* Default to using normal stack */ - sp = regs->sp; + sp = __frame->sp; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { @@ -291,18 +290,19 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, } return (void __user *) ((sp - frame_size) & ~7UL); + } /* end get_sigframe() */ /*****************************************************************************/ /* * */ -static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) +static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) { struct sigframe __user *frame; int rsig; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ka, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; @@ -346,47 +346,51 @@ static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct p } /* set up registers for signal handler */ - regs->sp = (unsigned long) frame; - regs->lr = (unsigned long) &frame->retcode; - regs->gr8 = sig; + __frame->sp = (unsigned long) frame; + __frame->lr = (unsigned long) &frame->retcode; + __frame->gr8 = sig; if (get_personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor *) ka->sa.sa_handler; - __get_user(regs->pc, &funcptr->text); - __get_user(regs->gr15, &funcptr->GOT); + __get_user(__frame->pc, &funcptr->text); + __get_user(__frame->gr15, &funcptr->GOT); } else { - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->gr15 = 0; + __frame->pc = (unsigned long) ka->sa.sa_handler; + __frame->gr15 = 0; } set_fs(USER_DS); + /* the tracer may want to single-step inside the handler */ + if (test_thread_flag(TIF_SINGLESTEP)) + ptrace_notify(SIGTRAP); + #if DEBUG_SIG printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", - sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); + sig, current->comm, current->pid, frame, __frame->pc, + frame->pretcode); #endif - return; + return 1; give_sigsegv: - if (sig == SIGSEGV) - ka->sa.sa_handler = SIG_DFL; - force_sig(SIGSEGV, current); + return 0; + } /* end setup_frame() */ /*****************************************************************************/ /* * */ -static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs * regs) +static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set) { struct rt_sigframe __user *frame; int rsig; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ka, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; @@ -409,7 +413,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (__put_user(0, &frame->uc.uc_flags) || __put_user(0, &frame->uc.uc_link) || __put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) || - __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags) || + __put_user(sas_ss_flags(__frame->sp), &frame->uc.uc_stack.ss_flags) || __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size)) goto give_sigsegv; @@ -440,34 +444,38 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, } /* Set up registers for signal handler */ - regs->sp = (unsigned long) frame; - regs->lr = (unsigned long) &frame->retcode; - regs->gr8 = sig; - regs->gr9 = (unsigned long) &frame->info; + __frame->sp = (unsigned long) frame; + __frame->lr = (unsigned long) &frame->retcode; + __frame->gr8 = sig; + __frame->gr9 = (unsigned long) &frame->info; if (get_personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor *funcptr = (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; - __get_user(regs->pc, &funcptr->text); - __get_user(regs->gr15, &funcptr->GOT); + __get_user(__frame->pc, &funcptr->text); + __get_user(__frame->gr15, &funcptr->GOT); } else { - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->gr15 = 0; + __frame->pc = (unsigned long) ka->sa.sa_handler; + __frame->gr15 = 0; } set_fs(USER_DS); + /* the tracer may want to single-step inside the handler */ + if (test_thread_flag(TIF_SINGLESTEP)) + ptrace_notify(SIGTRAP); + #if DEBUG_SIG printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", - sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); + sig, current->comm, current->pid, frame, __frame->pc, + frame->pretcode); #endif - return; + return 1; give_sigsegv: - if (sig == SIGSEGV) - ka->sa.sa_handler = SIG_DFL; force_sig(SIGSEGV, current); + return 0; } /* end setup_rt_frame() */ @@ -475,43 +483,51 @@ give_sigsegv: /* * OK, we're invoking a handler */ -static void handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, sigset_t *oldset, - struct pt_regs *regs) +static int handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, sigset_t *oldset) { + int ret; + /* Are we from a system call? */ - if (in_syscall(regs)) { + if (in_syscall(__frame)) { /* If so, check system call restarting.. */ - switch (regs->gr8) { + switch (__frame->gr8) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: - regs->gr8 = -EINTR; + __frame->gr8 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->gr8 = -EINTR; + __frame->gr8 = -EINTR; break; } + /* fallthrough */ case -ERESTARTNOINTR: - regs->gr8 = regs->orig_gr8; - regs->pc -= 4; + __frame->gr8 = __frame->orig_gr8; + __frame->pc -= 4; } } /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) - setup_rt_frame(sig, ka, info, oldset, regs); + ret = setup_rt_frame(sig, ka, info, oldset); else - setup_frame(sig, ka, oldset, regs); + ret = setup_frame(sig, ka, oldset); + + if (ret) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked, ¤t->blocked, + &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(¤t->blocked, sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } + + return ret; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); } /* end handle_signal() */ /*****************************************************************************/ @@ -520,7 +536,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -int do_signal(struct pt_regs *regs, sigset_t *oldset) +static int do_signal(sigset_t *oldset) { struct k_sigaction ka; siginfo_t info; @@ -532,7 +548,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) * kernel mode. Just return without doing anything * if so. */ - if (!user_mode(regs)) + if (!user_mode(__frame)) return 1; if (try_to_freeze()) @@ -541,30 +557,29 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) if (!oldset) oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - if (signr > 0) { - handle_signal(signr, &info, &ka, oldset, regs); - return 1; - } + signr = get_signal_to_deliver(&info, &ka, __frame, NULL); + if (signr > 0) + return handle_signal(signr, &info, &ka, oldset); - no_signal: +no_signal: /* Did we come from a system call? */ - if (regs->syscallno >= 0) { + if (__frame->syscallno >= 0) { /* Restart the system call - no handlers present */ - if (regs->gr8 == -ERESTARTNOHAND || - regs->gr8 == -ERESTARTSYS || - regs->gr8 == -ERESTARTNOINTR) { - regs->gr8 = regs->orig_gr8; - regs->pc -= 4; + if (__frame->gr8 == -ERESTARTNOHAND || + __frame->gr8 == -ERESTARTSYS || + __frame->gr8 == -ERESTARTNOINTR) { + __frame->gr8 = __frame->orig_gr8; + __frame->pc -= 4; } - if (regs->gr8 == -ERESTART_RESTARTBLOCK){ - regs->gr8 = __NR_restart_syscall; - regs->pc -= 4; + if (__frame->gr8 == -ERESTART_RESTARTBLOCK){ + __frame->gr8 = __NR_restart_syscall; + __frame->pc -= 4; } } return 0; + } /* end do_signal() */ /*****************************************************************************/ @@ -580,6 +595,6 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags) /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) - do_signal(__frame, NULL); + do_signal(NULL); } /* end do_notify_resume() */ diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 6004bb0..968fabd 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -464,7 +464,6 @@ config NUMA depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) - select SPARSEMEM_STATIC # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" @@ -493,6 +492,10 @@ config HAVE_ARCH_ALLOC_REMAP depends on NUMA default y +config ARCH_FLATMEM_ENABLE + def_bool y + depends on (ARCH_SELECT_MEMORY_MODEL && X86_PC) + config ARCH_DISCONTIGMEM_ENABLE def_bool y depends on NUMA @@ -503,7 +506,8 @@ config ARCH_DISCONTIGMEM_DEFAULT config ARCH_SPARSEMEM_ENABLE def_bool y - depends on NUMA + depends on (NUMA || (X86_PC && EXPERIMENTAL)) + select SPARSEMEM_STATIC config ARCH_SELECT_MEMORY_MODEL def_bool y diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu index 53bbb3c..79603b3 100644 --- a/arch/i386/Kconfig.cpu +++ b/arch/i386/Kconfig.cpu @@ -39,6 +39,7 @@ config M386 - "Winchip-2" for IDT Winchip 2. - "Winchip-2A" for IDT Winchips with 3dNow! capabilities. - "GeodeGX1" for Geode GX1 (Cyrix MediaGX). + - "Geode GX/LX" For AMD Geode GX and LX processors. - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3. - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above). @@ -171,6 +172,11 @@ config MGEODEGX1 help Select this for a Geode GX1 (Cyrix MediaGX) chip. +config MGEODE_LX + bool "Geode GX/LX" + help + Select this for AMD Geode GX and LX processors. + config MCYRIXIII bool "CyrixIII/VIA-C3" help @@ -220,8 +226,8 @@ config X86_XADD config X86_L1_CACHE_SHIFT int default "7" if MPENTIUM4 || X86_GENERIC - default "4" if X86_ELAN || M486 || M386 - default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1 + default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 + default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX default "6" if MK7 || MK8 || MPENTIUMM config RWSEM_GENERIC_SPINLOCK @@ -290,12 +296,12 @@ config X86_INTEL_USERCOPY config X86_USE_PPRO_CHECKSUM bool - depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON + depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX default y config X86_USE_3DNOW bool - depends on MCYRIXIII || MK7 + depends on MCYRIXIII || MK7 || MGEODE_LX default y config X86_OOSTORE diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug index c48b424..bf32ecc 100644 --- a/arch/i386/Kconfig.debug +++ b/arch/i386/Kconfig.debug @@ -42,6 +42,16 @@ config DEBUG_PAGEALLOC This results in a large slowdown, but helps to find certain types of memory corruptions. +config DEBUG_RODATA + bool "Write protect kernel read-only data structures" + depends on DEBUG_KERNEL + help + Mark the kernel read-only data as write-protected in the pagetables, + in order to catch accidental (and incorrect) writes to such const + data. This option may have a slight performance impact because a + portion of the kernel code won't be covered by a 2MB TLB anymore. + If in doubt, say "N". + config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on DEBUG_KERNEL diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 496a2c9..d8f94e7 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -721,7 +721,7 @@ static int __init apic_set_verbosity(char *str) apic_verbosity = APIC_VERBOSE; else printk(KERN_WARNING "APIC Verbosity level %s not recognised" - " use apic=verbose or apic=debug", str); + " use apic=verbose or apic=debug\n", str); return 0; } diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 1e60acb..2d793d4 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c @@ -303,17 +303,6 @@ extern int (*console_blank_hook)(int); #include "apm.h" /* - * Define to make all _set_limit calls use 64k limits. The APM 1.1 BIOS is - * supposed to provide limit information that it recognizes. Many machines - * do this correctly, but many others do not restrict themselves to their - * claimed limit. When this happens, they will cause a segmentation - * violation in the kernel at boot time. Most BIOS's, however, will - * respect a 64k limit, so we use that. If you want to be pedantic and - * hold your BIOS to its claims, then undefine this. - */ -#define APM_RELAX_SEGMENTS - -/* * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. * This patched by Chad Miller <cmiller@surfsouth.com>, original code by * David Chen <chen@ctpa04.mit.edu> @@ -1075,22 +1064,23 @@ static int apm_engage_power_management(u_short device, int enable) static int apm_console_blank(int blank) { - int error; - u_short state; + int error, i; + u_short state; + static const u_short dev[3] = { 0x100, 0x1FF, 0x101 }; state = blank ? APM_STATE_STANDBY : APM_STATE_READY; - /* Blank the first display device */ - error = set_power_state(0x100, state); - if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) { - /* try to blank them all instead */ - error = set_power_state(0x1ff, state); - if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) - /* try to blank device one instead */ - error = set_power_state(0x101, state); + + for (i = 0; i < ARRAY_SIZE(dev); i++) { + error = set_power_state(dev[i], state); + + if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) + return 1; + + if (error == APM_NOT_ENGAGED) + break; } - if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) - return 1; - if (error == APM_NOT_ENGAGED) { + + if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) { static int tried; int eng_error; if (tried++ == 0) { @@ -2233,8 +2223,8 @@ static struct dmi_system_id __initdata apm_dmi_table[] = { static int __init apm_init(void) { struct proc_dir_entry *apm_proc; + struct desc_struct *gdt; int ret; - int i; dmi_check_system(apm_dmi_table); @@ -2312,45 +2302,30 @@ static int __init apm_init(void) set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); + /* + * Set up the long jump entry point to the APM BIOS, which is called + * from inline assembly. + */ apm_bios_entry.offset = apm_info.bios.offset; apm_bios_entry.segment = APM_CS; - for (i = 0; i < NR_CPUS; i++) { - struct desc_struct *gdt = get_cpu_gdt_table(i); - set_base(gdt[APM_CS >> 3], - __va((unsigned long)apm_info.bios.cseg << 4)); - set_base(gdt[APM_CS_16 >> 3], - __va((unsigned long)apm_info.bios.cseg_16 << 4)); - set_base(gdt[APM_DS >> 3], - __va((unsigned long)apm_info.bios.dseg << 4)); -#ifndef APM_RELAX_SEGMENTS - if (apm_info.bios.version == 0x100) { -#endif - /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ - _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1); - /* For some unknown machine. */ - _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1); - /* For the DEC Hinote Ultra CT475 (and others?) */ - _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1); -#ifndef APM_RELAX_SEGMENTS - } else { - _set_limit((char *)&gdt[APM_CS >> 3], - (apm_info.bios.cseg_len - 1) & 0xffff); - _set_limit((char *)&gdt[APM_CS_16 >> 3], - (apm_info.bios.cseg_16_len - 1) & 0xffff); - _set_limit((char *)&gdt[APM_DS >> 3], - (apm_info.bios.dseg_len - 1) & 0xffff); - /* workaround for broken BIOSes */ - if (apm_info.bios.cseg_len <= apm_info.bios.offset) - _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1); - if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ - /* for the BIOS that assumes granularity = 1 */ - gdt[APM_DS >> 3].b |= 0x800000; - printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); - } - } -#endif - } + /* + * The APM 1.1 BIOS is supposed to provide limit information that it + * recognizes. Many machines do this correctly, but many others do + * not restrict themselves to their claimed limit. When this happens, + * they will cause a segmentation violation in the kernel at boot time. + * Most BIOS's, however, will respect a 64k limit, so we use that. + * + * Note we only set APM segments on CPU zero, since we pin the APM + * code to that CPU. + */ + gdt = get_cpu_gdt_table(0); + set_base(gdt[APM_CS >> 3], + __va((unsigned long)apm_info.bios.cseg << 4)); + set_base(gdt[APM_CS_16 >> 3], + __va((unsigned long)apm_info.bios.cseg_16 << 4)); + set_base(gdt[APM_DS >> 3], + __va((unsigned long)apm_info.bios.dseg << 4)); apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info); if (apm_proc) diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index e344ef8..e7697e0 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -161,8 +161,13 @@ static void __init init_amd(struct cpuinfo_x86 *c) set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); break; } - break; + if (c->x86_model == 10) { + /* AMD Geode LX is model 10 */ + /* placeholder for any needed mods */ + break; + } + break; case 6: /* An Athlon/Duron */ /* Bit 15 of Athlon specific MSR 15, needs to be 0 diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 31e344b..cca6556 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -18,9 +18,6 @@ #include "cpu.h" -DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); -EXPORT_PER_CPU_SYMBOL(cpu_gdt_table); - DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); @@ -599,11 +596,6 @@ void __devinit cpu_init(void) load_idt(&idt_descr); /* - * Delete NT - */ - __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); - - /* * Set up and load the per-CPU TSS and LDT */ atomic_inc(&init_mm.mm_count); diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c index ff87cc2..7501597 100644 --- a/arch/i386/kernel/cpu/cyrix.c +++ b/arch/i386/kernel/cpu/cyrix.c @@ -343,6 +343,31 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) } /* + * Handle National Semiconductor branded processors + */ +static void __devinit init_nsc(struct cpuinfo_x86 *c) +{ + /* There may be GX1 processors in the wild that are branded + * NSC and not Cyrix. + * + * This function only handles the GX processor, and kicks every + * thing else to the Cyrix init function above - that should + * cover any processors that might have been branded differently + * after NSC aquired Cyrix. + * + * If this breaks your GX1 horribly, please e-mail + * info-linux@ldcmail.amd.com to tell us. + */ + + /* Handle the GX (Formally known as the GX2) */ + + if (c->x86 == 5 && c->x86_model == 5) + display_cacheinfo(c); + else + init_cyrix(c); +} + +/* * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected * by the fact that they preserve the flags across the division of 5/2. * PII and PPro exhibit this behavior too, but they have cpuid available. @@ -422,7 +447,7 @@ int __init cyrix_init_cpu(void) static struct cpu_dev nsc_cpu_dev __initdata = { .c_vendor = "NSC", .c_ident = { "Geode by NSC" }, - .c_init = init_cyrix, + .c_init = init_nsc, .c_identify = generic_identify, }; diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index 13bae79..006141d 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c @@ -117,14 +117,13 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, { char __user *tmp = buf; u32 data[4]; - size_t rv; u32 reg = *ppos; int cpu = iminor(file->f_dentry->d_inode); if (count % 16) return -EINVAL; /* Invalid chunk size */ - for (rv = 0; count; count -= 16) { + for (; count; count -= 16) { do_cpuid(cpu, reg, data); if (copy_to_user(tmp, &data, 16)) return -EFAULT; diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index e50b9315..607c060 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -657,6 +657,7 @@ ENTRY(spurious_interrupt_bug) pushl $do_spurious_interrupt_bug jmp error_code +.section .rodata,"a" #include "syscall_table.S" syscall_table_size=(.-sys_call_table) diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index e437fb3..5884469 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -504,19 +504,24 @@ ENTRY(cpu_gdt_table) .quad 0x0000000000000000 /* 0x80 TSS descriptor */ .quad 0x0000000000000000 /* 0x88 LDT descriptor */ - /* Segments used for calling PnP BIOS */ - .quad 0x00c09a0000000000 /* 0x90 32-bit code */ - .quad 0x00809a0000000000 /* 0x98 16-bit code */ - .quad 0x0080920000000000 /* 0xa0 16-bit data */ - .quad 0x0080920000000000 /* 0xa8 16-bit data */ - .quad 0x0080920000000000 /* 0xb0 16-bit data */ + /* + * Segments used for calling PnP BIOS have byte granularity. + * They code segments and data segments have fixed 64k limits, + * the transfer segment sizes are set at run time. + */ + .quad 0x00409a000000ffff /* 0x90 32-bit code */ + .quad 0x00009a000000ffff /* 0x98 16-bit code */ + .quad 0x000092000000ffff /* 0xa0 16-bit data */ + .quad 0x0000920000000000 /* 0xa8 16-bit data */ + .quad 0x0000920000000000 /* 0xb0 16-bit data */ + /* * The APM segments have byte granularity and their bases - * and limits are set at run time. + * are set at run time. All have 64k limits. */ - .quad 0x00409a0000000000 /* 0xb8 APM CS code */ - .quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */ - .quad 0x0040920000000000 /* 0xc8 APM DS data */ + .quad 0x00409a000000ffff /* 0xb8 APM CS code */ + .quad 0x00009a000000ffff /* 0xc0 APM CS 16 code (16 bit) */ + .quad 0x004092000000ffff /* 0xc8 APM DS data */ .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */ .quad 0x0000000000000000 /* 0xd8 - unused */ @@ -525,3 +530,5 @@ ENTRY(cpu_gdt_table) .quad 0x0000000000000000 /* 0xf0 - unused */ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ + /* Be sure this is zeroed to avoid false validations in Xen */ + .fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0 diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c index 180f070..3999bec 100644 --- a/arch/i386/kernel/i386_ksyms.c +++ b/arch/i386/kernel/i386_ksyms.c @@ -3,8 +3,7 @@ #include <asm/checksum.h> #include <asm/desc.h> -/* This is definitely a GPL-only symbol */ -EXPORT_SYMBOL_GPL(cpu_gdt_table); +EXPORT_SYMBOL_GPL(cpu_gdt_descr); EXPORT_SYMBOL(__down_failed); EXPORT_SYMBOL(__down_failed_interruptible); diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 22c8675..7554f8f 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -1722,8 +1722,8 @@ void disable_IO_APIC(void) entry.dest_mode = 0; /* Physical */ entry.delivery_mode = dest_ExtINT; /* ExtInt */ entry.vector = 0; - entry.dest.physical.physical_dest = 0; - + entry.dest.physical.physical_dest = + GET_APIC_ID(apic_read(APIC_ID)); /* * Add it to the IO-APIC irq-routing table: diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index 1ca5269..91a6401 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -38,6 +38,12 @@ int smp_found_config; unsigned int __initdata maxcpus = NR_CPUS; +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_HOTPLUG_ENABLED (1) +#else +#define CPU_HOTPLUG_ENABLED (0) +#endif + /* * Various Linux-internal data structures created from the * MP-table. @@ -219,14 +225,18 @@ static void __devinit MP_processor_info (struct mpc_config_processor *m) cpu_set(num_processors, cpu_possible_map); num_processors++; - if ((num_processors > 8) && - ((APIC_XAPIC(ver) && - (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) || - (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))) - def_to_bigsmp = 1; - else - def_to_bigsmp = 0; - + if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) { + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (!APIC_XAPIC(ver)) { + def_to_bigsmp = 0; + break; + } + /* If P4 and above fall through */ + case X86_VENDOR_AMD: + def_to_bigsmp = 1; + } + } bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; } diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 44470fe..1d0a55e 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c @@ -172,7 +172,6 @@ static ssize_t msr_read(struct file *file, char __user * buf, { u32 __user *tmp = (u32 __user *) buf; u32 data[2]; - size_t rv; u32 reg = *ppos; int cpu = iminor(file->f_dentry->d_inode); int err; @@ -180,7 +179,7 @@ static ssize_t msr_read(struct file *file, char __user * buf, if (count % 8) return -EINVAL; /* Invalid chunk size */ - for (rv = 0; count; count -= 8) { + for (; count; count -= 8) { err = do_rdmsr(cpu, reg, &data[0], &data[1]); if (err) return err; diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 2333aea..45e7f0a 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -308,9 +308,7 @@ void show_regs(struct pt_regs * regs) cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); - if (current_cpu_data.x86 > 4) { - cr4 = read_cr4(); - } + cr4 = read_cr4_safe(); printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); show_trace(NULL, ®s->esp); } @@ -404,17 +402,7 @@ void flush_thread(void) void release_thread(struct task_struct *dead_task) { - if (dead_task->mm) { - // temporary debugging check - if (dead_task->mm->context.size) { - printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", - dead_task->comm, - dead_task->mm->context.ldt, - dead_task->mm->context.size); - BUG(); - } - } - + BUG_ON(dead_task->mm); release_vm86_irqs(dead_task); } diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 2afe0f8..2fa5803 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c @@ -111,12 +111,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), }, }, - { /* Handle problems with rebooting on HP nc6120 */ + { /* Handle problems with rebooting on HP laptops */ .callback = set_bios_reboot, - .ident = "HP Compaq nc6120", + .ident = "HP Compaq Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6120"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), }, }, { } diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index fdfcb0c..27c956d 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -954,6 +954,12 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg) return 0; } +static int __init +efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) +{ + memory_present(0, start, end); + return 0; +} /* * Find the highest page frame number we have available @@ -965,6 +971,7 @@ void __init find_max_pfn(void) max_pfn = 0; if (efi_enabled) { efi_memmap_walk(efi_find_max_pfn, &max_pfn); + efi_memmap_walk(efi_memory_present_wrapper, NULL); return; } @@ -979,6 +986,7 @@ void __init find_max_pfn(void) continue; if (end > max_pfn) max_pfn = end; + memory_present(0, start, end); } } diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 9ed449a..b3c2e2c 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -903,6 +903,12 @@ static int __devinit do_boot_cpu(int apicid, int cpu) unsigned long start_eip; unsigned short nmi_high = 0, nmi_low = 0; + if (!cpu_gdt_descr[cpu].address && + !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { + printk("Failed to allocate GDT for CPU %d\n", cpu); + return 1; + } + ++cpucount; /* diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 9b21a31..f7ba4ac 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S @@ -1,4 +1,3 @@ -.data ENTRY(sys_call_table) .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ .long sys_exit diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index d395e3b..47675bb 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c @@ -330,7 +330,9 @@ int recalibrate_cpu_khz(void) unsigned int cpu_khz_old = cpu_khz; if (cpu_has_tsc) { + local_irq_disable(); init_cpu_khz(); + local_irq_enable(); cpu_data[0].loops_per_jiffy = cpufreq_scale(cpu_data[0].loops_per_jiffy, cpu_khz_old, diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index ab0e943..53ad954 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -306,14 +306,17 @@ void die(const char * str, struct pt_regs * regs, long err) .lock_owner_depth = 0 }; static int die_counter; + unsigned long flags; if (die.lock_owner != raw_smp_processor_id()) { console_verbose(); - spin_lock_irq(&die.lock); + spin_lock_irqsave(&die.lock, flags); die.lock_owner = smp_processor_id(); die.lock_owner_depth = 0; bust_spinlocks(1); } + else + local_save_flags(flags); if (++die.lock_owner_depth < 3) { int nl = 0; @@ -340,7 +343,7 @@ void die(const char * str, struct pt_regs * regs, long err) bust_spinlocks(0); die.lock_owner = -1; - spin_unlock_irq(&die.lock); + spin_unlock_irqrestore(&die.lock, flags); if (kexec_should_crash(current)) crash_kexec(regs); @@ -1075,9 +1078,9 @@ void __init trap_init(void) set_trap_gate(0,÷_error); set_intr_gate(1,&debug); set_intr_gate(2,&nmi); - set_system_intr_gate(3, &int3); /* int3-5 can be called from all */ + set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ set_system_gate(4,&overflow); - set_system_gate(5,&bounds); + set_trap_gate(5,&bounds); set_trap_gate(6,&invalid_op); set_trap_gate(7,&device_not_available); set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); @@ -1095,6 +1098,28 @@ void __init trap_init(void) #endif set_trap_gate(19,&simd_coprocessor_error); + if (cpu_has_fxsr) { + /* + * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. + * Generates a compile-time "error: zero width for bit-field" if + * the alignment is wrong. + */ + struct fxsrAlignAssert { + int _:!(offsetof(struct task_struct, + thread.i387.fxsave) & 15); + }; + + printk(KERN_INFO "Enabling fast FPU save and restore... "); + set_in_cr4(X86_CR4_OSFXSR); + printk("done.\n"); + } + if (cpu_has_xmm) { + printk(KERN_INFO "Enabling unmasked SIMD FPU exception " + "support... "); + set_in_cr4(X86_CR4_OSXMMEXCPT); + printk("done.\n"); + } + set_system_gate(SYSCALL_VECTOR,&system_call); /* diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 06e26f0..7df494b 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -735,6 +735,30 @@ void free_initmem(void) printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); } +#ifdef CONFIG_DEBUG_RODATA + +extern char __start_rodata, __end_rodata; +void mark_rodata_ro(void) +{ + unsigned long addr = (unsigned long)&__start_rodata; + + for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE) + change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO); + + printk ("Write protecting the kernel read-only data: %luk\n", + (unsigned long)(&__end_rodata - &__start_rodata) >> 10); + + /* + * change_page_attr() requires a global_flush_tlb() call after it. + * We do this after the printk so that if something went wrong in the + * change, the printk gets out at least to give a better debug hint + * of who is the culprit. + */ + global_flush_tlb(); +} +#endif + + #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index f600fc2..c30a16d 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -13,6 +13,7 @@ #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> +#include <asm/sections.h> static DEFINE_SPINLOCK(cpa_lock); static struct list_head df_list = LIST_HEAD_INIT(df_list); @@ -36,7 +37,8 @@ pte_t *lookup_address(unsigned long address) return pte_offset_kernel(pmd, address); } -static struct page *split_large_page(unsigned long address, pgprot_t prot) +static struct page *split_large_page(unsigned long address, pgprot_t prot, + pgprot_t ref_prot) { int i; unsigned long addr; @@ -54,7 +56,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot) pbase = (pte_t *)page_address(base); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, - addr == address ? prot : PAGE_KERNEL)); + addr == address ? prot : ref_prot)); } return base; } @@ -98,11 +100,18 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) */ static inline void revert_page(struct page *kpte_page, unsigned long address) { - pte_t *linear = (pte_t *) + pgprot_t ref_prot; + pte_t *linear; + + ref_prot = + ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) + ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE; + + linear = (pte_t *) pmd_offset(pud_offset(pgd_offset_k(address), address), address); set_pmd_pte(linear, address, pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT, - PAGE_KERNEL_LARGE)); + ref_prot)); } static int @@ -123,10 +132,16 @@ __change_page_attr(struct page *page, pgprot_t prot) if ((pte_val(*kpte) & _PAGE_PSE) == 0) { set_pte_atomic(kpte, mk_pte(page, prot)); } else { - struct page *split = split_large_page(address, prot); + pgprot_t ref_prot; + struct page *split; + + ref_prot = + ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) + ? PAGE_KERNEL_EXEC : PAGE_KERNEL; + split = split_large_page(address, prot, ref_prot); if (!split) return -ENOMEM; - set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL)); + set_pmd_pte(kpte,address,mk_pte(split, ref_prot)); kpte_page = split; } get_page(kpte_page); diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c index 19e6f48..ee8e016 100644 --- a/arch/i386/pci/irq.c +++ b/arch/i386/pci/irq.c @@ -846,7 +846,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) * reported by the device if possible. */ newirq = dev->irq; - if (!((1 << newirq) & mask)) { + if (newirq && !((1 << newirq) & mask)) { if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev)); } diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 4d100f3..fae67bb 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -81,6 +81,12 @@ config PLAT_MAPPI2 config PLAT_MAPPI3 bool "Mappi-III(M3A-2170)" +config PLAT_M32104UT + bool "M32104UT" + help + The M3T-M32104UT is an reference board based on uT-Engine + specification. This board has a M32104 chip. + endchoice choice @@ -93,6 +99,10 @@ config CHIP_M32700 config CHIP_M32102 bool "M32102" +config CHIP_M32104 + bool "M32104" + depends on PLAT_M32104UT + config CHIP_VDEC2 bool "VDEC2" @@ -115,7 +125,7 @@ config TLB_ENTRIES config ISA_M32R bool - depends on CHIP_M32102 + depends on CHIP_M32102 || CHIP_M32104 default y config ISA_M32R2 @@ -140,6 +150,7 @@ config BUS_CLOCK default "50000000" if PLAT_MAPPI3 default "50000000" if PLAT_M32700UT default "50000000" if PLAT_OPSPUT + default "54000000" if PLAT_M32104UT default "33333333" if PLAT_OAKS32R default "20000000" if PLAT_MAPPI2 @@ -157,6 +168,7 @@ config MEMORY_START default "08000000" if PLAT_USRV default "08000000" if PLAT_M32700UT default "08000000" if PLAT_OPSPUT + default "04000000" if PLAT_M32104UT default "01000000" if PLAT_OAKS32R config MEMORY_SIZE @@ -166,6 +178,7 @@ config MEMORY_SIZE default "02000000" if PLAT_USRV default "01000000" if PLAT_M32700UT default "01000000" if PLAT_OPSPUT + default "01000000" if PLAT_M32104UT default "00800000" if PLAT_OAKS32R config NOHIGHMEM @@ -174,21 +187,22 @@ config NOHIGHMEM config ARCH_DISCONTIGMEM_ENABLE bool "Internal RAM Support" - depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP + depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104 default y source "mm/Kconfig" config IRAM_START hex "Internal memory start address (hex)" - default "00f00000" - depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP) && DISCONTIGMEM + default "00f00000" if !CHIP_M32104 + default "00700000" if CHIP_M32104 + depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104) && DISCONTIGMEM config IRAM_SIZE hex "Internal memory size (hex)" - depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP) && DISCONTIGMEM + depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104) && DISCONTIGMEM default "00080000" if CHIP_M32700 - default "00010000" if CHIP_M32102 || CHIP_OPSP + default "00010000" if CHIP_M32102 || CHIP_OPSP || CHIP_M32104 default "00008000" if CHIP_VDEC2 # diff --git a/arch/m32r/boot/compressed/head.S b/arch/m32r/boot/compressed/head.S index 07cfd6a..234d8b1 100644 --- a/arch/m32r/boot/compressed/head.S +++ b/arch/m32r/boot/compressed/head.S @@ -143,6 +143,11 @@ startup: ldi r0, -2 ldi r1, 0x0100 ; invalidate stb r1, @r0 +#elif defined(CONFIG_CHIP_M32104) + /* Cache flush */ + ldi r0, -2 + ldi r1, 0x0700 ; invalidate i-cache, copy back d-cache + sth r1, @r0 #else #error "put your cache flush function, please" #endif diff --git a/arch/m32r/boot/setup.S b/arch/m32r/boot/setup.S index 5d25643..3985425 100644 --- a/arch/m32r/boot/setup.S +++ b/arch/m32r/boot/setup.S @@ -1,11 +1,10 @@ /* * linux/arch/m32r/boot/setup.S -- A setup code. * - * Copyright (C) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata, - * and Hitoshi Yamamoto + * Copyright (C) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, + * Hitoshi Yamamoto, Hayato Fujiwara * */ -/* $Id$ */ #include <linux/linkage.h> #include <asm/segment.h> @@ -80,6 +79,20 @@ ENTRY(boot) ldi r1, #0x101 ; cache on (with invalidation) ; ldi r1, #0x00 ; cache off st r1, @r0 +#elif defined(CONFIG_CHIP_M32104) + ldi r0, #-96 ; DNCR0 + seth r1, #0x0060 ; from 0x00600000 + or3 r1, r1, #0x0005 ; size 2MB + st r1, @r0 + seth r1, #0x0100 ; from 0x01000000 + or3 r1, r1, #0x0003 ; size 16MB + st r1, @+r0 + seth r1, #0x0200 ; from 0x02000000 + or3 r1, r1, #0x0002 ; size 32MB + st r1, @+r0 + ldi r0, #-4 ;LDIMM (r0, M32R_MCCR) + ldi r1, #0x703 ; cache on (with invalidation) + st r1, @r0 #else #error unknown chip configuration #endif @@ -115,10 +128,15 @@ mmu_on: st r1, @(MATM_offset,r0) ; Set MATM (T bit ON) ld r0, @(MATM_offset,r0) ; Check #else +#if defined(CONFIG_CHIP_M32700) seth r0,#high(M32R_MCDCAR) or3 r0,r0,#low(M32R_MCDCAR) ld24 r1,#0x8080 st r1,@r0 +#elif defined(CONFIG_CHIP_M32104) + LDIMM (r2, eit_vector) ; set EVB(cr5) + mvtc r2, cr5 +#endif #endif /* CONFIG_MMU */ jmp r13 nop diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile index 6c6b6c3..5a2fa88 100644 --- a/arch/m32r/kernel/Makefile +++ b/arch/m32r/kernel/Makefile @@ -16,5 +16,6 @@ obj-$(CONFIG_PLAT_M32700UT) += setup_m32700ut.o io_m32700ut.o obj-$(CONFIG_PLAT_OPSPUT) += setup_opsput.o io_opsput.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_PLAT_OAKS32R) += setup_oaks32r.o io_oaks32r.o +obj-$(CONFIG_PLAT_M32104UT) += setup_m32104ut.o io_m32104ut.o EXTRA_AFLAGS := -traditional diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 396c942..3871b65 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -315,7 +315,7 @@ ENTRY(ei_handler) mv r1, sp ; arg1(regs) #if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \ || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \ - || defined(CONFIG_CHIP_OPSP) + || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) ; GET_ICU_STATUS; seth r0, #shigh(M32R_ICU_ISTS_ADDR) @@ -541,7 +541,20 @@ check_int2: bra check_end .fillinsn check_end: -#endif /* CONFIG_PLAT_OPSPUT */ +#elif defined(CONFIG_PLAT_M32104UT) + add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt + bnez r2, check_end + ; read ICU status register of PLD + seth r0, #high(PLD_ICUISTS) + or3 r0, r0, #low(PLD_ICUISTS) + lduh r0, @r0 + slli r0, #21 + srli r0, #27 ; ISN + addi r0, #(M32104UT_PLD_IRQ_BASE) + bra check_end + .fillinsn +check_end: +#endif /* CONFIG_PLAT_M32104UT */ bl do_IRQ #endif /* CONFIG_SMP */ ld r14, @sp+ @@ -651,8 +664,6 @@ ENTRY(rie_handler) /* void rie_handler(int error_code) */ SWITCH_TO_KERNEL_STACK SAVE_ALL - mvfc r0, bpc - ld r1, @r0 ldi r1, #0x20 ; error_code mv r0, sp ; pt_regs bl do_rie_handler diff --git a/arch/m32r/kernel/io_m32104ut.c b/arch/m32r/kernel/io_m32104ut.c new file mode 100644 index 0000000..d26adab --- /dev/null +++ b/arch/m32r/kernel/io_m32104ut.c @@ -0,0 +1,298 @@ +/* + * linux/arch/m32r/kernel/io_m32104ut.c + * + * Typical I/O routines for M32104UT board. + * + * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, + * Hitoshi Yamamoto, Mamoru Sakugawa, + * Naoto Sugai, Hayato Fujiwara + */ + +#include <linux/config.h> +#include <asm/m32r.h> +#include <asm/page.h> +#include <asm/io.h> +#include <asm/byteorder.h> + +#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) +#include <linux/types.h> + +#define M32R_PCC_IOMAP_SIZE 0x1000 + +#define M32R_PCC_IOSTART0 0x1000 +#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1) + +extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int); +extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int); +extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int); +extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); +#endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */ + +#define PORT2ADDR(port) _port2addr(port) + +static inline void *_port2addr(unsigned long port) +{ + return (void *)(port | NONCACHE_OFFSET); +} + +#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) +static inline void *__port2addr_ata(unsigned long port) +{ + static int dummy_reg; + + switch (port) { + case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET); + case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET); + case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET); + case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET); + case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET); + case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET); + case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET); + case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET); + case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET); + default: return (void *)&dummy_reg; + } +} +#endif + +/* + * M32104T-LAN is located in the extended bus space + * from 0x01000000 to 0x01ffffff on physical address. + * The base address of LAN controller(LAN91C111) is 0x300. + */ +#define LAN_IOSTART (0x300 | NONCACHE_OFFSET) +#define LAN_IOEND (0x320 | NONCACHE_OFFSET) +static inline void *_port2addr_ne(unsigned long port) +{ + return (void *)(port + NONCACHE_OFFSET + 0x01000000); +} + +static inline void delay(void) +{ + __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory"); +} + +/* + * NIC I/O function + */ + +#define PORT2ADDR_NE(port) _port2addr_ne(port) + +static inline unsigned char _ne_inb(void *portp) +{ + return *(volatile unsigned char *)portp; +} + +static inline unsigned short _ne_inw(void *portp) +{ + return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp); +} + +static inline void _ne_insb(void *portp, void *addr, unsigned long count) +{ + unsigned char *buf = (unsigned char *)addr; + + while (count--) + *buf++ = _ne_inb(portp); +} + +static inline void _ne_outb(unsigned char b, void *portp) +{ + *(volatile unsigned char *)portp = b; +} + +static inline void _ne_outw(unsigned short w, void *portp) +{ + *(volatile unsigned short *)portp = cpu_to_le16(w); +} + +unsigned char _inb(unsigned long port) +{ + if (port >= LAN_IOSTART && port < LAN_IOEND) + return _ne_inb(PORT2ADDR_NE(port)); + + return *(volatile unsigned char *)PORT2ADDR(port); +} + +unsigned short _inw(unsigned long port) +{ + if (port >= LAN_IOSTART && port < LAN_IOEND) + return _ne_inw(PORT2ADDR_NE(port)); + + return *(volatile unsigned short *)PORT2ADDR(port); +} + +unsigned long _inl(unsigned long port) +{ + return *(volatile unsigned long *)PORT2ADDR(port); +} + +unsigned char _inb_p(unsigned long port) +{ + unsigned char v = _inb(port); + delay(); + return (v); +} + +unsigned short _inw_p(unsigned long port) +{ + unsigned short v = _inw(port); + delay(); + return (v); +} + +unsigned long _inl_p(unsigned long port) +{ + unsigned long v = _inl(port); + delay(); + return (v); +} + +void _outb(unsigned char b, unsigned long port) +{ + if (port >= LAN_IOSTART && port < LAN_IOEND) + _ne_outb(b, PORT2ADDR_NE(port)); + else + *(volatile unsigned char *)PORT2ADDR(port) = b; +} + +void _outw(unsigned short w, unsigned long port) +{ + if (port >= LAN_IOSTART && port < LAN_IOEND) + _ne_outw(w, PORT2ADDR_NE(port)); + else + *(volatile unsigned short *)PORT2ADDR(port) = w; +} + +void _outl(unsigned long l, unsigned long port) +{ + *(volatile unsigned long *)PORT2ADDR(port) = l; +} + +void _outb_p(unsigned char b, unsigned long port) +{ + _outb(b, port); + delay(); +} + +void _outw_p(unsigned short w, unsigned long port) +{ + _outw(w, port); + delay(); +} + +void _outl_p(unsigned long l, unsigned long port) +{ + _outl(l, port); + delay(); +} + +void _insb(unsigned int port, void *addr, unsigned long count) +{ + if (port >= LAN_IOSTART && port < LAN_IOEND) + _ne_insb(PORT2ADDR_NE(port), addr, count); + else { + unsigned char *buf = addr; + unsigned char *portp = PORT2ADDR(port); + while (count--) + *buf++ = *(volatile unsigned char *)portp; + } +} + +void _insw(unsigned int port, void *addr, unsigned long count) +{ + unsigned short *buf = addr; + unsigned short *portp; + + if (port >= LAN_IOSTART && port < LAN_IOEND) { + /* + * This portion is only used by smc91111.c to read data + * from the DATA_REG. Do not swap the data. + */ + portp = PORT2ADDR_NE(port); + while (count--) + *buf++ = *(volatile unsigned short *)portp; +#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) + } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { + pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short), + count, 1); +#endif +#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) + } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { + portp = __port2addr_ata(port); + while (count--) + *buf++ = *(volatile unsigned short *)portp; +#endif + } else { + portp = PORT2ADDR(port); + while (count--) + *buf++ = *(volatile unsigned short *)portp; + } +} + +void _insl(unsigned int port, void *addr, unsigned long count) +{ + unsigned long *buf = addr; + unsigned long *portp; + + portp = PORT2ADDR(port); + while (count--) + *buf++ = *(volatile unsigned long *)portp; +} + +void _outsb(unsigned int port, const void *addr, unsigned long count) +{ + const unsigned char *buf = addr; + unsigned char *portp; + + if (port >= LAN_IOSTART && port < LAN_IOEND) { + portp = PORT2ADDR_NE(port); + while (count--) + _ne_outb(*buf++, portp); + } else { + portp = PORT2ADDR(port); + while (count--) + *(volatile unsigned char *)portp = *buf++; + } +} + +void _outsw(unsigned int port, const void *addr, unsigned long count) +{ + const unsigned short *buf = addr; + unsigned short *portp; + + if (port >= LAN_IOSTART && port < LAN_IOEND) { + /* + * This portion is only used by smc91111.c to write data + * into the DATA_REG. Do not swap the data. + */ + portp = PORT2ADDR_NE(port); + while (count--) + *(volatile unsigned short *)portp = *buf++; +#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) + } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { + portp = __port2addr_ata(port); + while (count--) + *(volatile unsigned short *)portp = *buf++; +#endif +#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) + } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { + pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short), + count, 1); +#endif + } else { + portp = PORT2ADDR(port); + while (count--) + *(volatile unsigned short *)portp = *buf++; + } +} + +void _outsl(unsigned int port, const void *addr, unsigned long count) +{ + const unsigned long *buf = addr; + unsigned char *portp; + + portp = PORT2ADDR(port); + while (count--) + *(volatile unsigned long *)portp = *buf++; +} diff --git a/arch/m32r/kernel/io_m32700ut.c b/arch/m32r/kernel/io_m32700ut.c index eda9f96..939932d 100644 --- a/arch/m32r/kernel/io_m32700ut.c +++ b/arch/m32r/kernel/io_m32700ut.c @@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); static inline void *_port2addr(unsigned long port) { - return (void *)(port + NONCACHE_OFFSET); + return (void *)(port | NONCACHE_OFFSET); } #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) @@ -45,15 +45,15 @@ static inline void *__port2addr_ata(unsigned long port) static int dummy_reg; switch (port) { - case 0x1f0: return (void *)0xac002000; - case 0x1f1: return (void *)0xac012800; - case 0x1f2: return (void *)0xac012002; - case 0x1f3: return (void *)0xac012802; - case 0x1f4: return (void *)0xac012004; - case 0x1f5: return (void *)0xac012804; - case 0x1f6: return (void *)0xac012006; - case 0x1f7: return (void *)0xac012806; - case 0x3f6: return (void *)0xac01200e; + case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET); + case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET); + case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET); + case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET); + case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET); + case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET); + case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET); + case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET); + case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET); default: return (void *)&dummy_reg; } } @@ -64,8 +64,8 @@ static inline void *__port2addr_ata(unsigned long port) * from 0x10000000 to 0x13ffffff on physical address. * The base address of LAN controller(LAN91C111) is 0x300. */ -#define LAN_IOSTART 0xa0000300 -#define LAN_IOEND 0xa0000320 +#define LAN_IOSTART (0x300 | NONCACHE_OFFSET) +#define LAN_IOEND (0x320 | NONCACHE_OFFSET) static inline void *_port2addr_ne(unsigned long port) { return (void *)(port + 0x10000000); diff --git a/arch/m32r/kernel/io_mappi.c b/arch/m32r/kernel/io_mappi.c index 3c3da04..a662b53 100644 --- a/arch/m32r/kernel/io_mappi.c +++ b/arch/m32r/kernel/io_mappi.c @@ -31,7 +31,7 @@ extern void pcc_iowrite(int, unsigned long, void *, size_t, size_t, int); static inline void *_port2addr(unsigned long port) { - return (void *)(port | (NONCACHE_OFFSET)); + return (void *)(port | NONCACHE_OFFSET); } static inline void *_port2addr_ne(unsigned long port) diff --git a/arch/m32r/kernel/io_mappi2.c b/arch/m32r/kernel/io_mappi2.c index df3c729..e72d725 100644 --- a/arch/m32r/kernel/io_mappi2.c +++ b/arch/m32r/kernel/io_mappi2.c @@ -33,7 +33,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); static inline void *_port2addr(unsigned long port) { - return (void *)(port | (NONCACHE_OFFSET)); + return (void *)(port | NONCACHE_OFFSET); } #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) @@ -42,22 +42,22 @@ static inline void *__port2addr_ata(unsigned long port) static int dummy_reg; switch (port) { - case 0x1f0: return (void *)0xac002000; - case 0x1f1: return (void *)0xac012800; - case 0x1f2: return (void *)0xac012002; - case 0x1f3: return (void *)0xac012802; - case 0x1f4: return (void *)0xac012004; - case 0x1f5: return (void *)0xac012804; - case 0x1f6: return (void *)0xac012006; - case 0x1f7: return (void *)0xac012806; - case 0x3f6: return (void *)0xac01200e; + case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET); + case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET); + case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET); + case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET); + case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET); + case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET); + case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET); + case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET); + case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET); default: return (void *)&dummy_reg; } } #endif -#define LAN_IOSTART 0xa0000300 -#define LAN_IOEND 0xa0000320 +#define LAN_IOSTART (0x300 | NONCACHE_OFFSET) +#define LAN_IOEND (0x320 | NONCACHE_OFFSET) #ifdef CONFIG_CHIP_OPSP static inline void *_port2addr_ne(unsigned long port) { diff --git a/arch/m32r/kernel/io_mappi3.c b/arch/m32r/kernel/io_mappi3.c index f80321a..ed6da93 100644 --- a/arch/m32r/kernel/io_mappi3.c +++ b/arch/m32r/kernel/io_mappi3.c @@ -33,7 +33,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); static inline void *_port2addr(unsigned long port) { - return (void *)(port + NONCACHE_OFFSET); + return (void *)(port | NONCACHE_OFFSET); } #if defined(CONFIG_IDE) @@ -43,33 +43,42 @@ static inline void *__port2addr_ata(unsigned long port) switch (port) { /* IDE0 CF */ - case 0x1f0: return (void *)0xb4002000; - case 0x1f1: return (void *)0xb4012800; - case 0x1f2: return (void *)0xb4012002; - case 0x1f3: return (void *)0xb4012802; - case 0x1f4: return (void *)0xb4012004; - case 0x1f5: return (void *)0xb4012804; - case 0x1f6: return (void *)0xb4012006; - case 0x1f7: return (void *)0xb4012806; - case 0x3f6: return (void *)0xb401200e; + case 0x1f0: return (void *)(0x14002000 | NONCACHE_OFFSET); + case 0x1f1: return (void *)(0x14012800 | NONCACHE_OFFSET); + case 0x1f2: return (void *)(0x14012002 | NONCACHE_OFFSET); + case 0x1f3: return (void *)(0x14012802 | NONCACHE_OFFSET); + case 0x1f4: return (void *)(0x14012004 | NONCACHE_OFFSET); + case 0x1f5: return (void *)(0x14012804 | NONCACHE_OFFSET); + case 0x1f6: return (void *)(0x14012006 | NONCACHE_OFFSET); + case 0x1f7: return (void *)(0x14012806 | NONCACHE_OFFSET); + case 0x3f6: return (void *)(0x1401200e | NONCACHE_OFFSET); /* IDE1 IDE */ - case 0x170: return (void *)0xb4810000; /* Data 16bit */ - case 0x171: return (void *)0xb4810002; /* Features / Error */ - case 0x172: return (void *)0xb4810004; /* Sector count */ - case 0x173: return (void *)0xb4810006; /* Sector number */ - case 0x174: return (void *)0xb4810008; /* Cylinder low */ - case 0x175: return (void *)0xb481000a; /* Cylinder high */ - case 0x176: return (void *)0xb481000c; /* Device head */ - case 0x177: return (void *)0xb481000e; /* Command */ - case 0x376: return (void *)0xb480800c; /* Device control / Alt status */ + case 0x170: /* Data 16bit */ + return (void *)(0x14810000 | NONCACHE_OFFSET); + case 0x171: /* Features / Error */ + return (void *)(0x14810002 | NONCACHE_OFFSET); + case 0x172: /* Sector count */ + return (void *)(0x14810004 | NONCACHE_OFFSET); + case 0x173: /* Sector number */ + return (void *)(0x14810006 | NONCACHE_OFFSET); + case 0x174: /* Cylinder low */ + return (void *)(0x14810008 | NONCACHE_OFFSET); + case 0x175: /* Cylinder high */ + return (void *)(0x1481000a | NONCACHE_OFFSET); + case 0x176: /* Device head */ + return (void *)(0x1481000c | NONCACHE_OFFSET); + case 0x177: /* Command */ + return (void *)(0x1481000e | NONCACHE_OFFSET); + case 0x376: /* Device control / Alt status */ + return (void *)(0x1480800c | NONCACHE_OFFSET); default: return (void *)&dummy_reg; } } #endif -#define LAN_IOSTART 0xa0000300 -#define LAN_IOEND 0xa0000320 +#define LAN_IOSTART (0x300 | NONCACHE_OFFSET) +#define LAN_IOEND (0x320 | NONCACHE_OFFSET) static inline void *_port2addr_ne(unsigned long port) { return (void *)(port + 0x10000000); diff --git a/arch/m32r/kernel/io_oaks32r.c b/arch/m32r/kernel/io_oaks32r.c index 8be3239..910dd13 100644 --- a/arch/m32r/kernel/io_oaks32r.c +++ b/arch/m32r/kernel/io_oaks32r.c @@ -16,7 +16,7 @@ static inline void *_port2addr(unsigned long port) { - return (void *)(port | (NONCACHE_OFFSET)); + return (void *)(port | NONCACHE_OFFSET); } static inline void *_port2addr_ne(unsigned long port) diff --git a/arch/m32r/kernel/io_opsput.c b/arch/m32r/kernel/io_opsput.c index 4793bd1..bec6929 100644 --- a/arch/m32r/kernel/io_opsput.c +++ b/arch/m32r/kernel/io_opsput.c @@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); static inline void *_port2addr(unsigned long port) { - return (void *)(port | (NONCACHE_OFFSET)); + return (void *)(port | NONCACHE_OFFSET); } /* @@ -44,8 +44,8 @@ static inline void *_port2addr(unsigned long port) * from 0x10000000 to 0x13ffffff on physical address. * The base address of LAN controller(LAN91C111) is 0x300. */ -#define LAN_IOSTART 0xa0000300 -#define LAN_IOEND 0xa0000320 +#define LAN_IOSTART (0x300 | NONCACHE_OFFSET) +#define LAN_IOEND (0x320 | NONCACHE_OFFSET) static inline void *_port2addr_ne(unsigned long port) { return (void *)(port + 0x10000000); diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c index f722ec8..c2e4dcc 100644 --- a/arch/m32r/kernel/setup.c +++ b/arch/m32r/kernel/setup.c @@ -320,6 +320,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) #elif defined(CONFIG_CHIP_MP) seq_printf(m, "cpu family\t: M32R-MP\n" "cache size\t: I-xxKB/D-xxKB\n"); +#elif defined(CONFIG_CHIP_M32104) + seq_printf(m,"cpu family\t: M32104\n" + "cache size\t: I-8KB/D-8KB\n"); #else seq_printf(m, "cpu family\t: Unknown\n"); #endif @@ -340,6 +343,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "Machine\t\t: uServer\n"); #elif defined(CONFIG_PLAT_OAKS32R) seq_printf(m, "Machine\t\t: OAKS32R\n"); +#elif defined(CONFIG_PLAT_M32104UT) + seq_printf(m, "Machine\t\t: M3T-M32104UT uT Engine board\n"); #else seq_printf(m, "Machine\t\t: Unknown\n"); #endif @@ -389,7 +394,7 @@ unsigned long cpu_initialized __initdata = 0; */ #if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \ || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \ - || defined(CONFIG_CHIP_OPSP) + || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) void __init cpu_init (void) { int cpu_id = smp_processor_id(); diff --git a/arch/m32r/kernel/setup_m32104ut.c b/arch/m32r/kernel/setup_m32104ut.c new file mode 100644 index 0000000..6328e13 --- /dev/null +++ b/arch/m32r/kernel/setup_m32104ut.c @@ -0,0 +1,156 @@ +/* + * linux/arch/m32r/kernel/setup_m32104ut.c + * + * Setup routines for M32104UT Board + * + * Copyright (c) 2002-2005 Hiroyuki Kondo, Hirokazu Takata, + * Hitoshi Yamamoto, Mamoru Sakugawa, + * Naoto Sugai, Hayato Fujiwara + */ + +#include <linux/config.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/device.h> + +#include <asm/system.h> +#include <asm/m32r.h> +#include <asm/io.h> + +#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) + +icu_data_t icu_data[NR_IRQS]; + +static void disable_m32104ut_irq(unsigned int irq) +{ + unsigned long port, data; + + port = irq2port(irq); + data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; + outl(data, port); +} + +static void enable_m32104ut_irq(unsigned int irq) +{ + unsigned long port, data; + + port = irq2port(irq); + data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; + outl(data, port); +} + +static void mask_and_ack_m32104ut(unsigned int irq) +{ + disable_m32104ut_irq(irq); +} + +static void end_m32104ut_irq(unsigned int irq) +{ + enable_m32104ut_irq(irq); +} + +static unsigned int startup_m32104ut_irq(unsigned int irq) +{ + enable_m32104ut_irq(irq); + return (0); +} + +static void shutdown_m32104ut_irq(unsigned int irq) +{ + unsigned long port; + + port = irq2port(irq); + outl(M32R_ICUCR_ILEVEL7, port); +} + +static struct hw_interrupt_type m32104ut_irq_type = +{ + .typename = "M32104UT-IRQ", + .startup = startup_m32104ut_irq, + .shutdown = shutdown_m32104ut_irq, + .enable = enable_m32104ut_irq, + .disable = disable_m32104ut_irq, + .ack = mask_and_ack_m32104ut, + .end = end_m32104ut_irq +}; + +void __init init_IRQ(void) +{ + static int once = 0; + + if (once) + return; + else + once++; + +#if defined(CONFIG_SMC91X) + /* INT#0: LAN controller on M32104UT-LAN (SMC91C111)*/ + irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED; + irq_desc[M32R_IRQ_INT0].handler = &m32104ut_irq_type; + irq_desc[M32R_IRQ_INT0].action = 0; + irq_desc[M32R_IRQ_INT0].depth = 1; + icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11; /* "H" level sense */ + disable_m32104ut_irq(M32R_IRQ_INT0); +#endif /* CONFIG_SMC91X */ + + /* MFT2 : system timer */ + irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED; + irq_desc[M32R_IRQ_MFT2].handler = &m32104ut_irq_type; + irq_desc[M32R_IRQ_MFT2].action = 0; + irq_desc[M32R_IRQ_MFT2].depth = 1; + icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; + disable_m32104ut_irq(M32R_IRQ_MFT2); + +#ifdef CONFIG_SERIAL_M32R_SIO + /* SIO0_R : uart receive data */ + irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED; + irq_desc[M32R_IRQ_SIO0_R].handler = &m32104ut_irq_type; + irq_desc[M32R_IRQ_SIO0_R].action = 0; + irq_desc[M32R_IRQ_SIO0_R].depth = 1; + icu_data[M32R_IRQ_SIO0_R].icucr = M32R_ICUCR_IEN; + disable_m32104ut_irq(M32R_IRQ_SIO0_R); + + /* SIO0_S : uart send data */ + irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED; + irq_desc[M32R_IRQ_SIO0_S].handler = &m32104ut_irq_type; + irq_desc[M32R_IRQ_SIO0_S].action = 0; + irq_desc[M32R_IRQ_SIO0_S].depth = 1; + icu_data[M32R_IRQ_SIO0_S].icucr = M32R_ICUCR_IEN; + disable_m32104ut_irq(M32R_IRQ_SIO0_S); +#endif /* CONFIG_SERIAL_M32R_SIO */ +} + +#if defined(CONFIG_SMC91X) + +#define LAN_IOSTART 0x300 +#define LAN_IOEND 0x320 +static struct resource smc91x_resources[] = { + [0] = { + .start = (LAN_IOSTART), + .end = (LAN_IOEND), + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = M32R_IRQ_INT0, + .end = M32R_IRQ_INT0, + .flags = IORESOURCE_IRQ, + } +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; +#endif + +static int __init platform_init(void) +{ +#if defined(CONFIG_SMC91X) + platform_device_register(&smc91x_device); +#endif + return 0; +} +arch_initcall(platform_init); diff --git a/arch/m32r/kernel/setup_m32700ut.c b/arch/m32r/kernel/setup_m32700ut.c index cb76916..fad1fc9 100644 --- a/arch/m32r/kernel/setup_m32700ut.c +++ b/arch/m32r/kernel/setup_m32700ut.c @@ -26,15 +26,7 @@ */ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) -#ifndef CONFIG_SMP -typedef struct { - unsigned long icucr; /* ICU Control Register */ -} icu_data_t; -static icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; -#else icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; -#endif /* CONFIG_SMP */ - static void disable_m32700ut_irq(unsigned int irq) { diff --git a/arch/m32r/kernel/setup_mappi.c b/arch/m32r/kernel/setup_mappi.c index 501d798..00f2532 100644 --- a/arch/m32r/kernel/setup_mappi.c +++ b/arch/m32r/kernel/setup_mappi.c @@ -19,12 +19,6 @@ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) -#ifndef CONFIG_SMP -typedef struct { - unsigned long icucr; /* ICU Control Register */ -} icu_data_t; -#endif /* CONFIG_SMP */ - icu_data_t icu_data[NR_IRQS]; static void disable_mappi_irq(unsigned int irq) diff --git a/arch/m32r/kernel/setup_mappi2.c b/arch/m32r/kernel/setup_mappi2.c index 7f2db5b..eebc9d8 100644 --- a/arch/m32r/kernel/setup_mappi2.c +++ b/arch/m32r/kernel/setup_mappi2.c @@ -19,12 +19,6 @@ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) -#ifndef CONFIG_SMP -typedef struct { - unsigned long icucr; /* ICU Control Register */ -} icu_data_t; -#endif /* CONFIG_SMP */ - icu_data_t icu_data[NR_IRQS]; static void disable_mappi2_irq(unsigned int irq) diff --git a/arch/m32r/kernel/setup_mappi3.c b/arch/m32r/kernel/setup_mappi3.c index f6ecdf7..d2ff021 100644 --- a/arch/m32r/kernel/setup_mappi3.c +++ b/arch/m32r/kernel/setup_mappi3.c @@ -19,12 +19,6 @@ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) -#ifndef CONFIG_SMP -typedef struct { - unsigned long icucr; /* ICU Control Register */ -} icu_data_t; -#endif /* CONFIG_SMP */ - icu_data_t icu_data[NR_IRQS]; static void disable_mappi3_irq(unsigned int irq) diff --git a/arch/m32r/kernel/setup_oaks32r.c b/arch/m32r/kernel/setup_oaks32r.c index 45add5b..0e9e635 100644 --- a/arch/m32r/kernel/setup_oaks32r.c +++ b/arch/m32r/kernel/setup_oaks32r.c @@ -18,12 +18,6 @@ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) -#ifndef CONFIG_SMP -typedef struct { - unsigned long icucr; /* ICU Control Register */ -} icu_data_t; -#endif /* CONFIG_SMP */ - icu_data_t icu_data[NR_IRQS]; static void disable_oaks32r_irq(unsigned int irq) diff --git a/arch/m32r/kernel/setup_opsput.c b/arch/m32r/kernel/setup_opsput.c index 1fbb140..548e8fc 100644 --- a/arch/m32r/kernel/setup_opsput.c +++ b/arch/m32r/kernel/setup_opsput.c @@ -27,15 +27,7 @@ */ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) -#ifndef CONFIG_SMP -typedef struct { - unsigned long icucr; /* ICU Control Register */ -} icu_data_t; -static icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ]; -#else icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ]; -#endif /* CONFIG_SMP */ - static void disable_opsput_irq(unsigned int irq) { diff --git a/arch/m32r/kernel/setup_usrv.c b/arch/m32r/kernel/setup_usrv.c index 634741b..64be659 100644 --- a/arch/m32r/kernel/setup_usrv.c +++ b/arch/m32r/kernel/setup_usrv.c @@ -18,12 +18,6 @@ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) -#if !defined(CONFIG_SMP) -typedef struct { - unsigned long icucr; /* ICU Control Register */ -} icu_data_t; -#endif /* CONFIG_SMP */ - icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; static void disable_mappi_irq(unsigned int irq) diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index 2ebce20..b8e68b5 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -57,7 +57,7 @@ static unsigned long do_gettimeoffset(void) #if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \ || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \ - || defined(CONFIG_CHIP_OPSP) + || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) #ifndef CONFIG_SMP unsigned long count; @@ -268,7 +268,7 @@ void __init time_init(void) #if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \ || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \ - || defined(CONFIG_CHIP_OPSP) + || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) /* M32102 MFT setup */ setup_irq(M32R_IRQ_MFT2, &irq0); diff --git a/arch/m32r/m32104ut/defconfig.m32104ut b/arch/m32r/m32104ut/defconfig.m32104ut new file mode 100644 index 0000000..454de33 --- /dev/null +++ b/arch/m32r/m32104ut/defconfig.m32104ut @@ -0,0 +1,657 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.14 +# Wed Nov 9 16:04:51 2005 +# +CONFIG_M32R=y +# CONFIG_UID16 is not set +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_PROBE=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_HOTPLUG=y +# CONFIG_KOBJECT_UEVENT is not set +# CONFIG_IKCONFIG is not set +CONFIG_INITRAMFS_SOURCE="" +CONFIG_EMBEDDED=y +# CONFIG_KALLSYMS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_BASE_FULL=y +# CONFIG_FUTEX is not set +# CONFIG_EPOLL is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_CC_ALIGN_FUNCTIONS=0 +CONFIG_CC_ALIGN_LABELS=0 +CONFIG_CC_ALIGN_LOOPS=0 +CONFIG_CC_ALIGN_JUMPS=0 +CONFIG_TINY_SHMEM=y +CONFIG_BASE_SMALL=0 + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Processor type and features +# +# CONFIG_PLAT_MAPPI is not set +# CONFIG_PLAT_USRV is not set +# CONFIG_PLAT_M32700UT is not set +# CONFIG_PLAT_OPSPUT is not set +# CONFIG_PLAT_OAKS32R is not set +# CONFIG_PLAT_MAPPI2 is not set +# CONFIG_PLAT_MAPPI3 is not set +CONFIG_PLAT_M32104UT=y +# CONFIG_CHIP_M32700 is not set +# CONFIG_CHIP_M32102 is not set +CONFIG_CHIP_M32104=y +# CONFIG_CHIP_VDEC2 is not set +# CONFIG_CHIP_OPSP is not set +CONFIG_ISA_M32R=y +CONFIG_BUS_CLOCK=54000000 +CONFIG_TIMER_DIVIDE=128 +# CONFIG_CPU_LITTLE_ENDIAN is not set +CONFIG_MEMORY_START=04000000 +CONFIG_MEMORY_SIZE=01000000 +CONFIG_NOHIGHMEM=y +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_GENERIC_CALIBRATE_DELAY=y +# CONFIG_PREEMPT is not set +# CONFIG_SMP is not set + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +# CONFIG_ISA is not set + +# +# PCCARD (PCMCIA/CardBus) support +# +CONFIG_PCCARD=y +# CONFIG_PCMCIA_DEBUG is not set +CONFIG_PCMCIA=y +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_PCMCIA_IOCTL=y + +# +# PC-card bridges +# + +# +# PCI Hotplug Support +# + +# +# Executable file formats +# +CONFIG_BINFMT_FLAT=y +# CONFIG_BINFMT_ZFLAT is not set +# CONFIG_BINFMT_SHARED_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_BIC=y +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_IEEE80211 is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_DEBUG_DRIVER is not set + +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_NBD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_CDROM_PKTCDVD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +# CONFIG_ATA_OVER_ETH is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# + +# +# I2O device support +# + +# +# Network device support +# +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# PHY device support +# +# CONFIG_PHYLIB is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_SMC91X=y +# CONFIG_NE2000 is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# PCMCIA network device support +# +# CONFIG_NET_PCMCIA is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_M32R_SIO=y +CONFIG_SERIAL_M32R_SIO_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=y +# CONFIG_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set + +# +# Ftape, the floppy tape device driver +# + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set +# CONFIG_RAW_DRIVER is not set + +# +# TPM devices +# + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Dallas's 1-wire bus +# +# CONFIG_W1 is not set + +# +# Hardware Monitoring support +# +# CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set + +# +# Misc devices +# + +# +# Multimedia Capabilities Port drivers +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB_ARCH_HAS_HCD is not set +# CONFIG_USB_ARCH_HAS_OHCI is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# MMC/SD Card support +# +# CONFIG_MMC is not set + +# +# InfiniBand support +# + +# +# SN Devices +# + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_INOTIFY is not set +# CONFIG_QUOTA is not set +CONFIG_DNOTIFY=y +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=932 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_CRAMFS=y +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +CONFIG_NLS_CODEPAGE_932=y +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_FS is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_STACK_USAGE is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Hardware crypto devices +# + +# +# Library routines +# +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y diff --git a/arch/m32r/mm/cache.c b/arch/m32r/mm/cache.c index 31b0789..9f54dd9 100644 --- a/arch/m32r/mm/cache.c +++ b/arch/m32r/mm/cache.c @@ -1,7 +1,7 @@ /* * linux/arch/m32r/mm/cache.c * - * Copyright (C) 2002 Hirokazu Takata + * Copyright (C) 2002-2005 Hirokazu Takata, Hayato Fujiwara */ #include <linux/config.h> @@ -9,7 +9,8 @@ #undef MCCR -#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP) +#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) \ + || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP) /* Cache Control Register */ #define MCCR ((volatile unsigned long*)0xfffffffc) #define MCCR_CC (1UL << 7) /* Cache mode modify bit */ @@ -26,7 +27,17 @@ #define MCCR ((volatile unsigned char*)0xfffffffe) #define MCCR_IIV (1UL << 0) /* I-cache invalidate */ #define MCCR_ICACHE_INV MCCR_IIV -#endif /* CONFIG_CHIP_XNUX2 || CONFIG_CHIP_M32700 */ +#elif defined(CONFIG_CHIP_M32104) +#define MCCR ((volatile unsigned short*)0xfffffffe) +#define MCCR_IIV (1UL << 8) /* I-cache invalidate */ +#define MCCR_DIV (1UL << 9) /* D-cache invalidate */ +#define MCCR_DCB (1UL << 10) /* D-cache copy back */ +#define MCCR_ICM (1UL << 0) /* I-cache mode [0:off,1:on] */ +#define MCCR_DCM (1UL << 1) /* D-cache mode [0:off,1:on] */ +#define MCCR_ICACHE_INV MCCR_IIV +#define MCCR_DCACHE_CB MCCR_DCB +#define MCCR_DCACHE_CBINV (MCCR_DIV|MCCR_DCB) +#endif #ifndef MCCR #error Unknown cache type. @@ -37,29 +48,42 @@ void _flush_cache_all(void) { #if defined(CONFIG_CHIP_M32102) + unsigned char mccr; *MCCR = MCCR_ICACHE_INV; +#elif defined(CONFIG_CHIP_M32104) + unsigned short mccr; + + /* Copyback and invalidate D-cache */ + /* Invalidate I-cache */ + *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CBINV); #else unsigned long mccr; /* Copyback and invalidate D-cache */ /* Invalidate I-cache */ *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV; - while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ #endif + while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ } /* Copy back D-cache and invalidate I-cache all */ void _flush_cache_copyback_all(void) { #if defined(CONFIG_CHIP_M32102) + unsigned char mccr; *MCCR = MCCR_ICACHE_INV; +#elif defined(CONFIG_CHIP_M32104) + unsigned short mccr; + + /* Copyback and invalidate D-cache */ + /* Invalidate I-cache */ + *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CB); #else unsigned long mccr; /* Copyback D-cache */ /* Invalidate I-cache */ *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB; - while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ - #endif + while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ } diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c index e93a5ad..b2c62ee 100644 --- a/arch/m68knommu/kernel/m68k_ksyms.c +++ b/arch/m68knommu/kernel/m68k_ksyms.c @@ -38,8 +38,6 @@ EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(ip_fast_csum); -EXPORT_SYMBOL(mach_enable_irq); -EXPORT_SYMBOL(mach_disable_irq); EXPORT_SYMBOL(kernel_thread); /* Networking helper routines. */ diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c index abb80fa..93120b9 100644 --- a/arch/m68knommu/kernel/setup.c +++ b/arch/m68knommu/kernel/setup.c @@ -65,8 +65,6 @@ void (*mach_kbd_leds) (unsigned int) = NULL; /* machine dependent irq functions */ void (*mach_init_IRQ) (void) = NULL; irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *) = NULL; -void (*mach_enable_irq) (unsigned int) = NULL; -void (*mach_disable_irq) (unsigned int) = NULL; int (*mach_get_irq_list) (struct seq_file *, void *) = NULL; void (*mach_process_int) (int irq, struct pt_regs *fp) = NULL; void (*mach_trap_init) (void); diff --git a/arch/ppc/boot/simple/Makefile b/arch/ppc/boot/simple/Makefile index f3e9c53..9533f8d 100644 --- a/arch/ppc/boot/simple/Makefile +++ b/arch/ppc/boot/simple/Makefile @@ -190,6 +190,8 @@ boot-$(CONFIG_REDWOOD_5) += embed_config.o boot-$(CONFIG_REDWOOD_6) += embed_config.o boot-$(CONFIG_8xx) += embed_config.o boot-$(CONFIG_8260) += embed_config.o +boot-$(CONFIG_EP405) += embed_config.o +boot-$(CONFIG_XILINX_ML300) += embed_config.o boot-$(CONFIG_BSEIP) += iic.o boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o boot-$(CONFIG_MV64X60) += misc-mv64x60.o diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c index 821a75e..1be3ca5 100644 --- a/arch/ppc/kernel/idle.c +++ b/arch/ppc/kernel/idle.c @@ -37,7 +37,6 @@ void default_idle(void) { void (*powersave)(void); - int cpu = smp_processor_id(); powersave = ppc_md.power_save; @@ -47,7 +46,8 @@ void default_idle(void) #ifdef CONFIG_SMP else { set_thread_flag(TIF_POLLING_NRFLAG); - while (!need_resched() && !cpu_is_offline(cpu)) + while (!need_resched() && + !cpu_is_offline(smp_processor_id())) barrier(); clear_thread_flag(TIF_POLLING_NRFLAG); } diff --git a/arch/ppc/platforms/4xx/ibm440gx.c b/arch/ppc/platforms/4xx/ibm440gx.c index 956f45e..d24c09e 100644 --- a/arch/ppc/platforms/4xx/ibm440gx.c +++ b/arch/ppc/platforms/4xx/ibm440gx.c @@ -58,7 +58,6 @@ static struct ocp_func_emac_data ibm440gx_emac2_def = { .wol_irq = 65, /* WOL interrupt number */ .mdio_idx = -1, /* No shared MDIO */ .tah_idx = 0, /* TAH device index */ - .jumbo = 1, /* Jumbo frames supported */ }; static struct ocp_func_emac_data ibm440gx_emac3_def = { @@ -72,7 +71,6 @@ static struct ocp_func_emac_data ibm440gx_emac3_def = { .wol_irq = 67, /* WOL interrupt number */ .mdio_idx = -1, /* No shared MDIO */ .tah_idx = 1, /* TAH device index */ - .jumbo = 1, /* Jumbo frames supported */ }; OCP_SYSFS_EMAC_DATA() diff --git a/arch/ppc/platforms/4xx/ibm440sp.c b/arch/ppc/platforms/4xx/ibm440sp.c index feb17e4..71a0117 100644 --- a/arch/ppc/platforms/4xx/ibm440sp.c +++ b/arch/ppc/platforms/4xx/ibm440sp.c @@ -31,7 +31,6 @@ static struct ocp_func_emac_data ibm440sp_emac0_def = { .wol_irq = 61, /* WOL interrupt number */ .mdio_idx = -1, /* No shared MDIO */ .tah_idx = -1, /* No TAH */ - .jumbo = 1, /* Jumbo frames supported */ }; OCP_SYSFS_EMAC_DATA() diff --git a/arch/ppc/platforms/lite5200.c b/arch/ppc/platforms/lite5200.c index d44cc99..7ed52dc 100644 --- a/arch/ppc/platforms/lite5200.c +++ b/arch/ppc/platforms/lite5200.c @@ -196,8 +196,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5, mpc52xx_set_bat(); /* No ISA bus by default */ +#ifdef CONFIG_PCI isa_io_base = 0; isa_mem_base = 0; +#endif /* Powersave */ /* This is provided as an example on how to do it. But you diff --git a/arch/ppc/platforms/mpc5200.c b/arch/ppc/platforms/mpc5200.c deleted file mode 100644 index a58db43..0000000 --- a/arch/ppc/platforms/mpc5200.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * arch/ppc/platforms/mpc5200.c - * - * OCP Definitions for the boards based on MPC5200 processor. Contains - * definitions for every common peripherals. (Mostly all but PSCs) - * - * Maintainer : Sylvain Munaut <tnt@246tNt.com> - * - * Copyright 2004 Sylvain Munaut <tnt@246tNt.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include <asm/ocp.h> -#include <asm/mpc52xx.h> - - -static struct ocp_fs_i2c_data mpc5200_i2c_def = { - .flags = FS_I2C_CLOCK_5200, -}; - - -/* Here is the core_ocp struct. - * With all the devices common to all board. Even if port multiplexing is - * not setup for them (if the user don't want them, just don't select the - * config option). The potentially conflicting devices (like PSCs) goes in - * board specific file. - */ -struct ocp_def core_ocp[] = { - { - .vendor = OCP_VENDOR_FREESCALE, - .function = OCP_FUNC_IIC, - .index = 0, - .paddr = MPC52xx_I2C1, - .irq = OCP_IRQ_NA, /* MPC52xx_IRQ_I2C1 - Buggy */ - .pm = OCP_CPM_NA, - .additions = &mpc5200_i2c_def, - }, - { - .vendor = OCP_VENDOR_FREESCALE, - .function = OCP_FUNC_IIC, - .index = 1, - .paddr = MPC52xx_I2C2, - .irq = OCP_IRQ_NA, /* MPC52xx_IRQ_I2C2 - Buggy */ - .pm = OCP_CPM_NA, - .additions = &mpc5200_i2c_def, - }, - { /* Terminating entry */ - .vendor = OCP_VENDOR_INVALID - } -}; diff --git a/arch/ppc/syslib/mpc52xx_pci.c b/arch/ppc/syslib/mpc52xx_pci.c index 4ac1908..313c96e 100644 --- a/arch/ppc/syslib/mpc52xx_pci.c +++ b/arch/ppc/syslib/mpc52xx_pci.c @@ -24,6 +24,12 @@ #include <asm/machdep.h> +/* This macro is defined to activate the workaround for the bug + 435 of the MPC5200 (L25R). With it activated, we don't do any + 32 bits configuration access during type-1 cycles */ +#define MPC5200_BUG_435_WORKAROUND + + static int mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) @@ -40,17 +46,39 @@ mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, ((bus->number - hose->bus_offset) << 16) | (devfn << 8) | (offset & 0xfc)); + mb(); + +#ifdef MPC5200_BUG_435_WORKAROUND + if (bus->number != hose->bus_offset) { + switch (len) { + case 1: + value = in_8(((u8 __iomem *)hose->cfg_data) + (offset & 3)); + break; + case 2: + value = in_le16(((u16 __iomem *)hose->cfg_data) + ((offset>>1) & 1)); + break; + + default: + value = in_le16((u16 __iomem *)hose->cfg_data) | + (in_le16(((u16 __iomem *)hose->cfg_data) + 1) << 16); + break; + } + } + else +#endif + { + value = in_le32(hose->cfg_data); - value = in_le32(hose->cfg_data); - - if (len != 4) { - value >>= ((offset & 0x3) << 3); - value &= 0xffffffff >> (32 - (len << 3)); + if (len != 4) { + value >>= ((offset & 0x3) << 3); + value &= 0xffffffff >> (32 - (len << 3)); + } } *val = value; out_be32(hose->cfg_addr, 0); + mb(); return PCIBIOS_SUCCESSFUL; } @@ -71,21 +99,48 @@ mpc52xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, ((bus->number - hose->bus_offset) << 16) | (devfn << 8) | (offset & 0xfc)); + mb(); + +#ifdef MPC5200_BUG_435_WORKAROUND + if (bus->number != hose->bus_offset) { + switch (len) { + case 1: + out_8(((u8 __iomem *)hose->cfg_data) + + (offset & 3), val); + break; + case 2: + out_le16(((u16 __iomem *)hose->cfg_data) + + ((offset>>1) & 1), val); + break; + + default: + out_le16((u16 __iomem *)hose->cfg_data, + (u16)val); + out_le16(((u16 __iomem *)hose->cfg_data) + 1, + (u16)(val>>16)); + break; + } + } + else +#endif + { + if (len != 4) { + value = in_le32(hose->cfg_data); - if (len != 4) { - value = in_le32(hose->cfg_data); + offset = (offset & 0x3) << 3; + mask = (0xffffffff >> (32 - (len << 3))); + mask <<= offset; - offset = (offset & 0x3) << 3; - mask = (0xffffffff >> (32 - (len << 3))); - mask <<= offset; + value &= ~mask; + val = value | ((val << offset) & mask); + } - value &= ~mask; - val = value | ((val << offset) & mask); + out_le32(hose->cfg_data, val); } - - out_le32(hose->cfg_data, val); + mb(); out_be32(hose->cfg_addr, 0); + mb(); return PCIBIOS_SUCCESSFUL; } @@ -99,9 +154,12 @@ static struct pci_ops mpc52xx_pci_ops = { static void __init mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs) { + u32 tmp; /* Setup control regs */ - /* Nothing to do afaik */ + tmp = in_be32(&pci_regs->scr); + tmp |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + out_be32(&pci_regs->scr, tmp); /* Setup windows */ out_be32(&pci_regs->iw0btar, MPC52xx_PCI_IWBTAR_TRANSLATION( @@ -142,16 +200,15 @@ mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs) /* Not necessary and can be a bad thing if for example the bootloader is displaying a splash screen or ... Just left here for documentation purpose if anyone need it */ -#if 0 - u32 tmp; tmp = in_be32(&pci_regs->gscr); +#if 0 out_be32(&pci_regs->gscr, tmp | MPC52xx_PCI_GSCR_PR); udelay(50); - out_be32(&pci_regs->gscr, tmp); #endif + out_be32(&pci_regs->gscr, tmp & ~MPC52xx_PCI_GSCR_PR); } -static void __init +static void mpc52xx_pci_fixup_resources(struct pci_dev *dev) { int i; diff --git a/arch/ppc/syslib/mpc52xx_setup.c b/arch/ppc/syslib/mpc52xx_setup.c index bb23745..a4a4b02 100644 --- a/arch/ppc/syslib/mpc52xx_setup.c +++ b/arch/ppc/syslib/mpc52xx_setup.c @@ -84,9 +84,11 @@ mpc52xx_set_bat(void) void __init mpc52xx_map_io(void) { - /* Here we only map the MBAR */ + /* Here we map the MBAR and the whole upper zone. MBAR is only + 64k but we can't map only 64k with BATs. Map the whole + 0xf0000000 range is ok and helps eventual lpb devices placed there */ io_block_mapping( - MPC52xx_MBAR_VIRT, MPC52xx_MBAR, MPC52xx_MBAR_SIZE, _PAGE_IO); + MPC52xx_MBAR_VIRT, MPC52xx_MBAR, 0x10000000, _PAGE_IO); } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 477ac27..6fe532d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -23,14 +23,14 @@ config GENERIC_BUST_SPINLOCK mainmenu "Linux Kernel Configuration" -config ARCH_S390 +config S390 bool default y config UID16 bool default y - depends on ARCH_S390X = 'n' + depends on !64BIT source "init/Kconfig" @@ -38,20 +38,12 @@ menu "Base setup" comment "Processor type and features" -config ARCH_S390X +config 64BIT bool "64 bit kernel" help Select this option if you have a 64 bit IBM zSeries machine and want to use the 64 bit addressing mode. -config 64BIT - def_bool ARCH_S390X - -config ARCH_S390_31 - bool - depends on ARCH_S390X = 'n' - default y - config SMP bool "Symmetric multi-processing support" ---help--- @@ -101,20 +93,15 @@ config MATHEMU on older S/390 machines. Say Y unless you know your machine doesn't need this. -config S390_SUPPORT +config COMPAT bool "Kernel support for 31 bit emulation" - depends on ARCH_S390X + depends on 64BIT help Select this option if you want to enable your system kernel to handle system-calls from ELF binaries for 31 bit ESA. This option (and some other stuff like libraries and such) is needed for executing 31 bit applications. It is safe to say "Y". -config COMPAT - bool - depends on S390_SUPPORT - default y - config SYSVIPC_COMPAT bool depends on COMPAT && SYSVIPC @@ -122,7 +109,7 @@ config SYSVIPC_COMPAT config BINFMT_ELF32 tristate "Kernel support for 31 bit ELF binaries" - depends on S390_SUPPORT + depends on COMPAT help This allows you to run 32-bit Linux/ELF binaries on your zSeries in 64 bit mode. Everybody wants this; say Y. @@ -135,7 +122,7 @@ choice config MARCH_G5 bool "S/390 model G5 and G6" - depends on ARCH_S390_31 + depends on !64BIT help Select this to build a 31 bit kernel that works on all S/390 and zSeries machines. @@ -240,8 +227,8 @@ config MACHCHK_WARNING config QDIO tristate "QDIO support" ---help--- - This driver provides the Queued Direct I/O base support for the - IBM S/390 (G5 and G6) and eServer zSeries (z800, z890, z900 and z990). + This driver provides the Queued Direct I/O base support for + IBM mainframes. For details please refer to the documentation provided by IBM at <http://www10.software.ibm.com/developerworks/opensource/linux390> @@ -263,7 +250,8 @@ config QDIO_DEBUG bool "Extended debugging information" depends on QDIO help - Say Y here to get extended debugging output in /proc/s390dbf/qdio... + Say Y here to get extended debugging output in + /sys/kernel/debug/s390dbf/qdio... Warning: this option reduces the performance of the QDIO module. If unsure, say N. diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 73a09a6..6c6b197 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -13,16 +13,14 @@ # Copyright (C) 1994 by Linus Torvalds # -ifdef CONFIG_ARCH_S390_31 +ifndef CONFIG_64BIT LDFLAGS := -m elf_s390 CFLAGS += -m31 AFLAGS += -m31 UTS_MACHINE := s390 STACK_SIZE := 8192 CHECKFLAGS += -D__s390__ -endif - -ifdef CONFIG_ARCH_S390X +else LDFLAGS := -m elf64_s390 MODFLAGS += -fpic -D__PIC__ CFLAGS += -m64 diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index dee6ab5..d06a8d7 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -40,7 +40,7 @@ #define TOD_MICRO 0x01000 /* nr. of TOD clock units for 1 microsecond */ -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT #define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ #define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ @@ -54,13 +54,13 @@ #define APPLDATA_GEN_EVENT_RECORD 0x82 #define APPLDATA_START_CONFIG_REC 0x83 -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ /* * Parameter list for DIAGNOSE X'DC' */ -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT struct appldata_parameter_list { u16 diag; /* The DIAGNOSE code X'00DC' */ u8 function; /* The function code for the DIAGNOSE */ @@ -82,7 +82,7 @@ struct appldata_parameter_list { u64 product_id_addr; u64 buffer_addr; }; -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ /* * /proc entries (sysctl) diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index e0a476b..99ddd3b 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -141,19 +141,19 @@ static void appldata_get_os_data(void *data) j = 0; for_each_online_cpu(i) { os_data->os_cpu[j].per_cpu_user = - kstat_cpu(i).cpustat.user; + cputime_to_jiffies(kstat_cpu(i).cpustat.user); os_data->os_cpu[j].per_cpu_nice = - kstat_cpu(i).cpustat.nice; + cputime_to_jiffies(kstat_cpu(i).cpustat.nice); os_data->os_cpu[j].per_cpu_system = - kstat_cpu(i).cpustat.system; + cputime_to_jiffies(kstat_cpu(i).cpustat.system); os_data->os_cpu[j].per_cpu_idle = - kstat_cpu(i).cpustat.idle; + cputime_to_jiffies(kstat_cpu(i).cpustat.idle); os_data->os_cpu[j].per_cpu_irq = - kstat_cpu(i).cpustat.irq; + cputime_to_jiffies(kstat_cpu(i).cpustat.irq); os_data->os_cpu[j].per_cpu_softirq = - kstat_cpu(i).cpustat.softirq; + cputime_to_jiffies(kstat_cpu(i).cpustat.softirq); os_data->os_cpu[j].per_cpu_iowait = - kstat_cpu(i).cpustat.iowait; + cputime_to_jiffies(kstat_cpu(i).cpustat.iowait); j++; } diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 96a05e6..bfe2541 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -2,7 +2,9 @@ # Cryptographic API # -obj-$(CONFIG_CRYPTO_SHA1_Z990) += sha1_z990.o -obj-$(CONFIG_CRYPTO_DES_Z990) += des_z990.o des_check_key.o +obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o +obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o +obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o +obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o -obj-$(CONFIG_CRYPTO_TEST) += crypt_z990_query.o +obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c new file mode 100644 index 0000000..7a1033d --- /dev/null +++ b/arch/s390/crypto/aes_s390.c @@ -0,0 +1,248 @@ +/* + * Cryptographic API. + * + * s390 implementation of the AES Cipher Algorithm. + * + * s390 Version: + * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation + * Author(s): Jan Glauber (jang@de.ibm.com) + * + * Derived from "crypto/aes.c" + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/crypto.h> +#include "crypt_s390.h" + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 + +/* data block size for all key lengths */ +#define AES_BLOCK_SIZE 16 + +int has_aes_128 = 0; +int has_aes_192 = 0; +int has_aes_256 = 0; + +struct s390_aes_ctx { + u8 iv[AES_BLOCK_SIZE]; + u8 key[AES_MAX_KEY_SIZE]; + int key_len; +}; + +static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len, + u32 *flags) +{ + struct s390_aes_ctx *sctx = ctx; + + switch (key_len) { + case 16: + if (!has_aes_128) + goto fail; + break; + case 24: + if (!has_aes_192) + goto fail; + + break; + case 32: + if (!has_aes_256) + goto fail; + break; + default: + /* invalid key length */ + goto fail; + break; + } + + sctx->key_len = key_len; + memcpy(sctx->key, in_key, key_len); + return 0; +fail: + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; +} + +static void aes_encrypt(void *ctx, u8 *out, const u8 *in) +{ + const struct s390_aes_ctx *sctx = ctx; + + switch (sctx->key_len) { + case 16: + crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, + AES_BLOCK_SIZE); + break; + case 24: + crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, + AES_BLOCK_SIZE); + break; + case 32: + crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, + AES_BLOCK_SIZE); + break; + } +} + +static void aes_decrypt(void *ctx, u8 *out, const u8 *in) +{ + const struct s390_aes_ctx *sctx = ctx; + + switch (sctx->key_len) { + case 16: + crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, + AES_BLOCK_SIZE); + break; + case 24: + crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, + AES_BLOCK_SIZE); + break; + case 32: + crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, + AES_BLOCK_SIZE); + break; + } +} + +static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); + + switch (sctx->key_len) { + case 16: + crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes); + break; + case 24: + crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes); + break; + case 32: + crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes); + break; + } + return nbytes & ~(AES_BLOCK_SIZE - 1); +} + +static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); + + switch (sctx->key_len) { + case 16: + crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes); + break; + case 24: + crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes); + break; + case 32: + crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes); + break; + } + return nbytes & ~(AES_BLOCK_SIZE - 1); +} + +static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); + + memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); + switch (sctx->key_len) { + case 16: + crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes); + break; + case 24: + crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes); + break; + case 32: + crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes); + break; + } + memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE); + + return nbytes & ~(AES_BLOCK_SIZE - 1); +} + +static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, + const u8 *in, unsigned int nbytes) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); + + memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); + switch (sctx->key_len) { + case 16: + crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes); + break; + case 24: + crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes); + break; + case 32: + crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes); + break; + } + return nbytes & ~(AES_BLOCK_SIZE - 1); +} + + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s390_aes_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt, + .cia_encrypt_ecb = aes_encrypt_ecb, + .cia_decrypt_ecb = aes_decrypt_ecb, + .cia_encrypt_cbc = aes_encrypt_cbc, + .cia_decrypt_cbc = aes_decrypt_cbc, + } + } +}; + +static int __init aes_init(void) +{ + int ret; + + if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) + has_aes_128 = 1; + if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) + has_aes_192 = 1; + if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) + has_aes_256 = 1; + + if (!has_aes_128 && !has_aes_192 && !has_aes_256) + return -ENOSYS; + + ret = crypto_register_alg(&aes_alg); + if (ret != 0) + printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n"); + return ret; +} + +static void __exit aes_fini(void) +{ + crypto_unregister_alg(&aes_alg); +} + +module_init(aes_init); +module_exit(aes_fini); + +MODULE_ALIAS("aes"); + +MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); +MODULE_LICENSE("GPL"); + diff --git a/arch/s390/crypto/crypt_z990.h b/arch/s390/crypto/crypt_s390.h index 4df660b..d1c259a 100644 --- a/arch/s390/crypto/crypt_z990.h +++ b/arch/s390/crypto/crypt_s390.h @@ -1,7 +1,7 @@ /* * Cryptographic API. * - * Support for z990 cryptographic instructions. + * Support for s390 cryptographic instructions. * * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation * Author(s): Thomas Spatzier (tspat@de.ibm.com) @@ -12,84 +12,108 @@ * any later version. * */ -#ifndef _CRYPTO_ARCH_S390_CRYPT_Z990_H -#define _CRYPTO_ARCH_S390_CRYPT_Z990_H +#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H +#define _CRYPTO_ARCH_S390_CRYPT_S390_H #include <asm/errno.h> -#define CRYPT_Z990_OP_MASK 0xFF00 -#define CRYPT_Z990_FUNC_MASK 0x00FF +#define CRYPT_S390_OP_MASK 0xFF00 +#define CRYPT_S390_FUNC_MASK 0x00FF - -/*z990 cryptographic operations*/ -enum crypt_z990_operations { - CRYPT_Z990_KM = 0x0100, - CRYPT_Z990_KMC = 0x0200, - CRYPT_Z990_KIMD = 0x0300, - CRYPT_Z990_KLMD = 0x0400, - CRYPT_Z990_KMAC = 0x0500 +/* s930 cryptographic operations */ +enum crypt_s390_operations { + CRYPT_S390_KM = 0x0100, + CRYPT_S390_KMC = 0x0200, + CRYPT_S390_KIMD = 0x0300, + CRYPT_S390_KLMD = 0x0400, + CRYPT_S390_KMAC = 0x0500 }; -/*function codes for KM (CIPHER MESSAGE) instruction*/ -enum crypt_z990_km_func { - KM_QUERY = CRYPT_Z990_KM | 0, - KM_DEA_ENCRYPT = CRYPT_Z990_KM | 1, - KM_DEA_DECRYPT = CRYPT_Z990_KM | 1 | 0x80, //modifier bit->decipher - KM_TDEA_128_ENCRYPT = CRYPT_Z990_KM | 2, - KM_TDEA_128_DECRYPT = CRYPT_Z990_KM | 2 | 0x80, - KM_TDEA_192_ENCRYPT = CRYPT_Z990_KM | 3, - KM_TDEA_192_DECRYPT = CRYPT_Z990_KM | 3 | 0x80, +/* function codes for KM (CIPHER MESSAGE) instruction + * 0x80 is the decipher modifier bit + */ +enum crypt_s390_km_func { + KM_QUERY = CRYPT_S390_KM | 0x0, + KM_DEA_ENCRYPT = CRYPT_S390_KM | 0x1, + KM_DEA_DECRYPT = CRYPT_S390_KM | 0x1 | 0x80, + KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2, + KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80, + KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3, + KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80, + KM_AES_128_ENCRYPT = CRYPT_S390_KM | 0x12, + KM_AES_128_DECRYPT = CRYPT_S390_KM | 0x12 | 0x80, + KM_AES_192_ENCRYPT = CRYPT_S390_KM | 0x13, + KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80, + KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14, + KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, }; -/*function codes for KMC (CIPHER MESSAGE WITH CHAINING) instruction*/ -enum crypt_z990_kmc_func { - KMC_QUERY = CRYPT_Z990_KMC | 0, - KMC_DEA_ENCRYPT = CRYPT_Z990_KMC | 1, - KMC_DEA_DECRYPT = CRYPT_Z990_KMC | 1 | 0x80, //modifier bit->decipher - KMC_TDEA_128_ENCRYPT = CRYPT_Z990_KMC | 2, - KMC_TDEA_128_DECRYPT = CRYPT_Z990_KMC | 2 | 0x80, - KMC_TDEA_192_ENCRYPT = CRYPT_Z990_KMC | 3, - KMC_TDEA_192_DECRYPT = CRYPT_Z990_KMC | 3 | 0x80, +/* function codes for KMC (CIPHER MESSAGE WITH CHAINING) + * instruction + */ +enum crypt_s390_kmc_func { + KMC_QUERY = CRYPT_S390_KMC | 0x0, + KMC_DEA_ENCRYPT = CRYPT_S390_KMC | 0x1, + KMC_DEA_DECRYPT = CRYPT_S390_KMC | 0x1 | 0x80, + KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2, + KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80, + KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3, + KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80, + KMC_AES_128_ENCRYPT = CRYPT_S390_KMC | 0x12, + KMC_AES_128_DECRYPT = CRYPT_S390_KMC | 0x12 | 0x80, + KMC_AES_192_ENCRYPT = CRYPT_S390_KMC | 0x13, + KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, + KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, + KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, }; -/*function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) instruction*/ -enum crypt_z990_kimd_func { - KIMD_QUERY = CRYPT_Z990_KIMD | 0, - KIMD_SHA_1 = CRYPT_Z990_KIMD | 1, +/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) + * instruction + */ +enum crypt_s390_kimd_func { + KIMD_QUERY = CRYPT_S390_KIMD | 0, + KIMD_SHA_1 = CRYPT_S390_KIMD | 1, + KIMD_SHA_256 = CRYPT_S390_KIMD | 2, }; -/*function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) instruction*/ -enum crypt_z990_klmd_func { - KLMD_QUERY = CRYPT_Z990_KLMD | 0, - KLMD_SHA_1 = CRYPT_Z990_KLMD | 1, +/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) + * instruction + */ +enum crypt_s390_klmd_func { + KLMD_QUERY = CRYPT_S390_KLMD | 0, + KLMD_SHA_1 = CRYPT_S390_KLMD | 1, + KLMD_SHA_256 = CRYPT_S390_KLMD | 2, }; -/*function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) instruction*/ -enum crypt_z990_kmac_func { - KMAC_QUERY = CRYPT_Z990_KMAC | 0, - KMAC_DEA = CRYPT_Z990_KMAC | 1, - KMAC_TDEA_128 = CRYPT_Z990_KMAC | 2, - KMAC_TDEA_192 = CRYPT_Z990_KMAC | 3 +/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) + * instruction + */ +enum crypt_s390_kmac_func { + KMAC_QUERY = CRYPT_S390_KMAC | 0, + KMAC_DEA = CRYPT_S390_KMAC | 1, + KMAC_TDEA_128 = CRYPT_S390_KMAC | 2, + KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 }; -/*status word for z990 crypto instructions' QUERY functions*/ -struct crypt_z990_query_status { +/* status word for s390 crypto instructions' QUERY functions */ +struct crypt_s390_query_status { u64 high; u64 low; }; /* - * Standard fixup and ex_table sections for crypt_z990 inline functions. - * label 0: the z990 crypto operation - * label 1: just after 1 to catch illegal operation exception on non-z990 + * Standard fixup and ex_table sections for crypt_s390 inline functions. + * label 0: the s390 crypto operation + * label 1: just after 1 to catch illegal operation exception + * (unsupported model) * label 6: the return point after fixup * label 7: set error value if exception _in_ crypto operation * label 8: set error value if illegal operation exception * [ret] is the variable to receive the error code * [ERR] is the error code value */ -#ifndef __s390x__ -#define __crypt_z990_fixup \ +#ifndef CONFIG_64BIT +#define __crypt_s390_fixup \ ".section .fixup,\"ax\" \n" \ "7: lhi %0,%h[e1] \n" \ " bras 1,9f \n" \ @@ -105,8 +129,8 @@ struct crypt_z990_query_status { " .long 0b,7b \n" \ " .long 1b,8b \n" \ ".previous" -#else /* __s390x__ */ -#define __crypt_z990_fixup \ +#else /* CONFIG_64BIT */ +#define __crypt_s390_fixup \ ".section .fixup,\"ax\" \n" \ "7: lhi %0,%h[e1] \n" \ " jg 6b \n" \ @@ -118,25 +142,25 @@ struct crypt_z990_query_status { " .quad 0b,7b \n" \ " .quad 1b,8b \n" \ ".previous" -#endif /* __s390x__ */ +#endif /* CONFIG_64BIT */ /* - * Standard code for setting the result of z990 crypto instructions. + * Standard code for setting the result of s390 crypto instructions. * %0: the register which will receive the result * [result]: the register containing the result (e.g. second operand length * to compute number of processed bytes]. */ -#ifndef __s390x__ -#define __crypt_z990_set_result \ +#ifndef CONFIG_64BIT +#define __crypt_s390_set_result \ " lr %0,%[result] \n" -#else /* __s390x__ */ -#define __crypt_z990_set_result \ +#else /* CONFIG_64BIT */ +#define __crypt_s390_set_result \ " lgr %0,%[result] \n" #endif /* - * Executes the KM (CIPHER MESSAGE) operation of the z990 CPU. - * @param func: the function code passed to KM; see crypt_z990_km_func + * Executes the KM (CIPHER MESSAGE) operation of the CPU. + * @param func: the function code passed to KM; see crypt_s390_km_func * @param param: address of parameter block; see POP for details on each func * @param dest: address of destination memory area * @param src: address of source memory area @@ -145,9 +169,9 @@ struct crypt_z990_query_status { * for encryption/decryption funcs */ static inline int -crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len) +crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) { - register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; + register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; register void* __param asm("1") = param; register u8* __dest asm("4") = dest; register const u8* __src asm("2") = src; @@ -156,26 +180,26 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len) ret = 0; __asm__ __volatile__ ( - "0: .insn rre,0xB92E0000,%1,%2 \n" //KM opcode - "1: brc 1,0b \n" //handle partial completion - __crypt_z990_set_result + "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */ + "1: brc 1,0b \n" /* handle partial completion */ + __crypt_s390_set_result "6: \n" - __crypt_z990_fixup + __crypt_s390_fixup : "+d" (ret), "+a" (__dest), "+a" (__src), [result] "+d" (__src_len) : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), "a" (__param) : "cc", "memory" ); - if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ + if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ ret = src_len - ret; } return ret; } /* - * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the z990 CPU. - * @param func: the function code passed to KM; see crypt_z990_kmc_func + * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. + * @param func: the function code passed to KM; see crypt_s390_kmc_func * @param param: address of parameter block; see POP for details on each func * @param dest: address of destination memory area * @param src: address of source memory area @@ -184,9 +208,9 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len) * for encryption/decryption funcs */ static inline int -crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len) +crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) { - register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; + register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; register void* __param asm("1") = param; register u8* __dest asm("4") = dest; register const u8* __src asm("2") = src; @@ -195,18 +219,18 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len) ret = 0; __asm__ __volatile__ ( - "0: .insn rre,0xB92F0000,%1,%2 \n" //KMC opcode - "1: brc 1,0b \n" //handle partial completion - __crypt_z990_set_result + "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */ + "1: brc 1,0b \n" /* handle partial completion */ + __crypt_s390_set_result "6: \n" - __crypt_z990_fixup + __crypt_s390_fixup : "+d" (ret), "+a" (__dest), "+a" (__src), [result] "+d" (__src_len) : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), "a" (__param) : "cc", "memory" ); - if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ + if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ ret = src_len - ret; } return ret; @@ -214,8 +238,8 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len) /* * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation - * of the z990 CPU. - * @param func: the function code passed to KM; see crypt_z990_kimd_func + * of the CPU. + * @param func: the function code passed to KM; see crypt_s390_kimd_func * @param param: address of parameter block; see POP for details on each func * @param src: address of source memory area * @param src_len: length of src operand in bytes @@ -223,9 +247,9 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len) * for digest funcs */ static inline int -crypt_z990_kimd(long func, void* param, const u8* src, long src_len) +crypt_s390_kimd(long func, void* param, const u8* src, long src_len) { - register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; + register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; register void* __param asm("1") = param; register const u8* __src asm("2") = src; register long __src_len asm("3") = src_len; @@ -233,25 +257,25 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len) ret = 0; __asm__ __volatile__ ( - "0: .insn rre,0xB93E0000,%1,%1 \n" //KIMD opcode - "1: brc 1,0b \n" /*handle partical completion of kimd*/ - __crypt_z990_set_result + "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */ + "1: brc 1,0b \n" /* handle partical completion */ + __crypt_s390_set_result "6: \n" - __crypt_z990_fixup + __crypt_s390_fixup : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), "a" (__param) : "cc", "memory" ); - if (ret >= 0 && (func & CRYPT_Z990_FUNC_MASK)){ + if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){ ret = src_len - ret; } return ret; } /* - * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the z990 CPU. - * @param func: the function code passed to KM; see crypt_z990_klmd_func + * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. + * @param func: the function code passed to KM; see crypt_s390_klmd_func * @param param: address of parameter block; see POP for details on each func * @param src: address of source memory area * @param src_len: length of src operand in bytes @@ -259,9 +283,9 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len) * for digest funcs */ static inline int -crypt_z990_klmd(long func, void* param, const u8* src, long src_len) +crypt_s390_klmd(long func, void* param, const u8* src, long src_len) { - register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; + register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; register void* __param asm("1") = param; register const u8* __src asm("2") = src; register long __src_len asm("3") = src_len; @@ -269,17 +293,17 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len) ret = 0; __asm__ __volatile__ ( - "0: .insn rre,0xB93F0000,%1,%1 \n" //KLMD opcode - "1: brc 1,0b \n" /*handle partical completion of klmd*/ - __crypt_z990_set_result + "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */ + "1: brc 1,0b \n" /* handle partical completion */ + __crypt_s390_set_result "6: \n" - __crypt_z990_fixup + __crypt_s390_fixup : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), "a" (__param) : "cc", "memory" ); - if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ + if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ ret = src_len - ret; } return ret; @@ -287,8 +311,8 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len) /* * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation - * of the z990 CPU. - * @param func: the function code passed to KM; see crypt_z990_klmd_func + * of the CPU. + * @param func: the function code passed to KM; see crypt_s390_klmd_func * @param param: address of parameter block; see POP for details on each func * @param src: address of source memory area * @param src_len: length of src operand in bytes @@ -296,9 +320,9 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len) * for digest funcs */ static inline int -crypt_z990_kmac(long func, void* param, const u8* src, long src_len) +crypt_s390_kmac(long func, void* param, const u8* src, long src_len) { - register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; + register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; register void* __param asm("1") = param; register const u8* __src asm("2") = src; register long __src_len asm("3") = src_len; @@ -306,58 +330,58 @@ crypt_z990_kmac(long func, void* param, const u8* src, long src_len) ret = 0; __asm__ __volatile__ ( - "0: .insn rre,0xB91E0000,%5,%5 \n" //KMAC opcode - "1: brc 1,0b \n" /*handle partical completion of klmd*/ - __crypt_z990_set_result + "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */ + "1: brc 1,0b \n" /* handle partical completion */ + __crypt_s390_set_result "6: \n" - __crypt_z990_fixup + __crypt_s390_fixup : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), "a" (__param) : "cc", "memory" ); - if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ + if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ ret = src_len - ret; } return ret; } /** - * Tests if a specific z990 crypto function is implemented on the machine. + * Tests if a specific crypto function is implemented on the machine. * @param func: the function code of the specific function; 0 if op in general * @return 1 if func available; 0 if func or op in general not available */ static inline int -crypt_z990_func_available(int func) +crypt_s390_func_available(int func) { int ret; - struct crypt_z990_query_status status = { + struct crypt_s390_query_status status = { .high = 0, .low = 0 }; - switch (func & CRYPT_Z990_OP_MASK){ - case CRYPT_Z990_KM: - ret = crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0); + switch (func & CRYPT_S390_OP_MASK){ + case CRYPT_S390_KM: + ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); break; - case CRYPT_Z990_KMC: - ret = crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0); + case CRYPT_S390_KMC: + ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); break; - case CRYPT_Z990_KIMD: - ret = crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0); + case CRYPT_S390_KIMD: + ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); break; - case CRYPT_Z990_KLMD: - ret = crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0); + case CRYPT_S390_KLMD: + ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); break; - case CRYPT_Z990_KMAC: - ret = crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0); + case CRYPT_S390_KMAC: + ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); break; default: ret = 0; return ret; } if (ret >= 0){ - func &= CRYPT_Z990_FUNC_MASK; + func &= CRYPT_S390_FUNC_MASK; func &= 0x7f; //mask modifier bit if (func < 64){ ret = (status.high >> (64 - func - 1)) & 0x1; @@ -370,5 +394,4 @@ crypt_z990_func_available(int func) return ret; } - -#endif // _CRYPTO_ARCH_S390_CRYPT_Z990_H +#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c new file mode 100644 index 0000000..def02bd --- /dev/null +++ b/arch/s390/crypto/crypt_s390_query.c @@ -0,0 +1,129 @@ +/* + * Cryptographic API. + * + * Support for s390 cryptographic instructions. + * Testing module for querying processor crypto capabilities. + * + * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Thomas Spatzier (tspat@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/errno.h> +#include "crypt_s390.h" + +static void query_available_functions(void) +{ + printk(KERN_INFO "#####################\n"); + + /* query available KM functions */ + printk(KERN_INFO "KM_QUERY: %d\n", + crypt_s390_func_available(KM_QUERY)); + printk(KERN_INFO "KM_DEA: %d\n", + crypt_s390_func_available(KM_DEA_ENCRYPT)); + printk(KERN_INFO "KM_TDEA_128: %d\n", + crypt_s390_func_available(KM_TDEA_128_ENCRYPT)); + printk(KERN_INFO "KM_TDEA_192: %d\n", + crypt_s390_func_available(KM_TDEA_192_ENCRYPT)); + printk(KERN_INFO "KM_AES_128: %d\n", + crypt_s390_func_available(KM_AES_128_ENCRYPT)); + printk(KERN_INFO "KM_AES_192: %d\n", + crypt_s390_func_available(KM_AES_192_ENCRYPT)); + printk(KERN_INFO "KM_AES_256: %d\n", + crypt_s390_func_available(KM_AES_256_ENCRYPT)); + + /* query available KMC functions */ + printk(KERN_INFO "KMC_QUERY: %d\n", + crypt_s390_func_available(KMC_QUERY)); + printk(KERN_INFO "KMC_DEA: %d\n", + crypt_s390_func_available(KMC_DEA_ENCRYPT)); + printk(KERN_INFO "KMC_TDEA_128: %d\n", + crypt_s390_func_available(KMC_TDEA_128_ENCRYPT)); + printk(KERN_INFO "KMC_TDEA_192: %d\n", + crypt_s390_func_available(KMC_TDEA_192_ENCRYPT)); + printk(KERN_INFO "KMC_AES_128: %d\n", + crypt_s390_func_available(KMC_AES_128_ENCRYPT)); + printk(KERN_INFO "KMC_AES_192: %d\n", + crypt_s390_func_available(KMC_AES_192_ENCRYPT)); + printk(KERN_INFO "KMC_AES_256: %d\n", + crypt_s390_func_available(KMC_AES_256_ENCRYPT)); + + /* query available KIMD fucntions */ + printk(KERN_INFO "KIMD_QUERY: %d\n", + crypt_s390_func_available(KIMD_QUERY)); + printk(KERN_INFO "KIMD_SHA_1: %d\n", + crypt_s390_func_available(KIMD_SHA_1)); + printk(KERN_INFO "KIMD_SHA_256: %d\n", + crypt_s390_func_available(KIMD_SHA_256)); + + /* query available KLMD functions */ + printk(KERN_INFO "KLMD_QUERY: %d\n", + crypt_s390_func_available(KLMD_QUERY)); + printk(KERN_INFO "KLMD_SHA_1: %d\n", + crypt_s390_func_available(KLMD_SHA_1)); + printk(KERN_INFO "KLMD_SHA_256: %d\n", + crypt_s390_func_available(KLMD_SHA_256)); + + /* query available KMAC functions */ + printk(KERN_INFO "KMAC_QUERY: %d\n", + crypt_s390_func_available(KMAC_QUERY)); + printk(KERN_INFO "KMAC_DEA: %d\n", + crypt_s390_func_available(KMAC_DEA)); + printk(KERN_INFO "KMAC_TDEA_128: %d\n", + crypt_s390_func_available(KMAC_TDEA_128)); + printk(KERN_INFO "KMAC_TDEA_192: %d\n", + crypt_s390_func_available(KMAC_TDEA_192)); +} + +static int init(void) +{ + struct crypt_s390_query_status status = { + .high = 0, + .low = 0 + }; + + printk(KERN_INFO "crypt_s390: querying available crypto functions\n"); + crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); + printk(KERN_INFO "KM:\t%016llx %016llx\n", + (unsigned long long) status.high, + (unsigned long long) status.low); + status.high = status.low = 0; + crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); + printk(KERN_INFO "KMC:\t%016llx %016llx\n", + (unsigned long long) status.high, + (unsigned long long) status.low); + status.high = status.low = 0; + crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); + printk(KERN_INFO "KIMD:\t%016llx %016llx\n", + (unsigned long long) status.high, + (unsigned long long) status.low); + status.high = status.low = 0; + crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); + printk(KERN_INFO "KLMD:\t%016llx %016llx\n", + (unsigned long long) status.high, + (unsigned long long) status.low); + status.high = status.low = 0; + crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); + printk(KERN_INFO "KMAC:\t%016llx %016llx\n", + (unsigned long long) status.high, + (unsigned long long) status.low); + + query_available_functions(); + return -ECANCELED; +} + +static void __exit cleanup(void) +{ +} + +module_init(init); +module_exit(cleanup); + +MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/crypt_z990_query.c b/arch/s390/crypto/crypt_z990_query.c deleted file mode 100644 index 7133983..0000000 --- a/arch/s390/crypto/crypt_z990_query.c +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Cryptographic API. - * - * Support for z990 cryptographic instructions. - * Testing module for querying processor crypto capabilities. - * - * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Thomas Spatzier (tspat@de.ibm.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <asm/errno.h> -#include "crypt_z990.h" - -static void -query_available_functions(void) -{ - printk(KERN_INFO "#####################\n"); - //query available KM functions - printk(KERN_INFO "KM_QUERY: %d\n", - crypt_z990_func_available(KM_QUERY)); - printk(KERN_INFO "KM_DEA: %d\n", - crypt_z990_func_available(KM_DEA_ENCRYPT)); - printk(KERN_INFO "KM_TDEA_128: %d\n", - crypt_z990_func_available(KM_TDEA_128_ENCRYPT)); - printk(KERN_INFO "KM_TDEA_192: %d\n", - crypt_z990_func_available(KM_TDEA_192_ENCRYPT)); - //query available KMC functions - printk(KERN_INFO "KMC_QUERY: %d\n", - crypt_z990_func_available(KMC_QUERY)); - printk(KERN_INFO "KMC_DEA: %d\n", - crypt_z990_func_available(KMC_DEA_ENCRYPT)); - printk(KERN_INFO "KMC_TDEA_128: %d\n", - crypt_z990_func_available(KMC_TDEA_128_ENCRYPT)); - printk(KERN_INFO "KMC_TDEA_192: %d\n", - crypt_z990_func_available(KMC_TDEA_192_ENCRYPT)); - //query available KIMD fucntions - printk(KERN_INFO "KIMD_QUERY: %d\n", - crypt_z990_func_available(KIMD_QUERY)); - printk(KERN_INFO "KIMD_SHA_1: %d\n", - crypt_z990_func_available(KIMD_SHA_1)); - //query available KLMD functions - printk(KERN_INFO "KLMD_QUERY: %d\n", - crypt_z990_func_available(KLMD_QUERY)); - printk(KERN_INFO "KLMD_SHA_1: %d\n", - crypt_z990_func_available(KLMD_SHA_1)); - //query available KMAC functions - printk(KERN_INFO "KMAC_QUERY: %d\n", - crypt_z990_func_available(KMAC_QUERY)); - printk(KERN_INFO "KMAC_DEA: %d\n", - crypt_z990_func_available(KMAC_DEA)); - printk(KERN_INFO "KMAC_TDEA_128: %d\n", - crypt_z990_func_available(KMAC_TDEA_128)); - printk(KERN_INFO "KMAC_TDEA_192: %d\n", - crypt_z990_func_available(KMAC_TDEA_192)); -} - -static int -init(void) -{ - struct crypt_z990_query_status status = { - .high = 0, - .low = 0 - }; - - printk(KERN_INFO "crypt_z990: querying available crypto functions\n"); - crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0); - printk(KERN_INFO "KM: %016llx %016llx\n", - (unsigned long long) status.high, - (unsigned long long) status.low); - status.high = status.low = 0; - crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0); - printk(KERN_INFO "KMC: %016llx %016llx\n", - (unsigned long long) status.high, - (unsigned long long) status.low); - status.high = status.low = 0; - crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0); - printk(KERN_INFO "KIMD: %016llx %016llx\n", - (unsigned long long) status.high, - (unsigned long long) status.low); - status.high = status.low = 0; - crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0); - printk(KERN_INFO "KLMD: %016llx %016llx\n", - (unsigned long long) status.high, - (unsigned long long) status.low); - status.high = status.low = 0; - crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0); - printk(KERN_INFO "KMAC: %016llx %016llx\n", - (unsigned long long) status.high, - (unsigned long long) status.low); - - query_available_functions(); - return -1; -} - -static void __exit -cleanup(void) -{ -} - -module_init(init); -module_exit(cleanup); - -MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/des_z990.c b/arch/s390/crypto/des_s390.c index 813cf37..a38bb2a 100644 --- a/arch/s390/crypto/des_z990.c +++ b/arch/s390/crypto/des_s390.c @@ -1,7 +1,7 @@ /* * Cryptographic API. * - * z990 implementation of the DES Cipher Algorithm. + * s390 implementation of the DES Cipher Algorithm. * * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Thomas Spatzier (tspat@de.ibm.com) @@ -19,7 +19,7 @@ #include <linux/errno.h> #include <asm/scatterlist.h> #include <linux/crypto.h> -#include "crypt_z990.h" +#include "crypt_s390.h" #include "crypto_des.h" #define DES_BLOCK_SIZE 8 @@ -31,17 +31,17 @@ #define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE) #define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE -struct crypt_z990_des_ctx { +struct crypt_s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; u8 key[DES_KEY_SIZE]; }; -struct crypt_z990_des3_128_ctx { +struct crypt_s390_des3_128_ctx { u8 iv[DES_BLOCK_SIZE]; u8 key[DES3_128_KEY_SIZE]; }; -struct crypt_z990_des3_192_ctx { +struct crypt_s390_des3_192_ctx { u8 iv[DES_BLOCK_SIZE]; u8 key[DES3_192_KEY_SIZE]; }; @@ -49,7 +49,7 @@ struct crypt_z990_des3_192_ctx { static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) { - struct crypt_z990_des_ctx *dctx; + struct crypt_s390_des_ctx *dctx; int ret; dctx = ctx; @@ -65,26 +65,26 @@ des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) static void des_encrypt(void *ctx, u8 *dst, const u8 *src) { - struct crypt_z990_des_ctx *dctx; + struct crypt_s390_des_ctx *dctx; dctx = ctx; - crypt_z990_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); + crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); } static void des_decrypt(void *ctx, u8 *dst, const u8 *src) { - struct crypt_z990_des_ctx *dctx; + struct crypt_s390_des_ctx *dctx; dctx = ctx; - crypt_z990_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); + crypt_s390_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); } static struct crypto_alg des_alg = { .cra_name = "des", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypt_z990_des_ctx), + .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(des_alg.cra_list), .cra_u = { .cipher = { @@ -111,7 +111,7 @@ static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) { int i, ret; - struct crypt_z990_des3_128_ctx *dctx; + struct crypt_s390_des3_128_ctx *dctx; const u8* temp_key = key; dctx = ctx; @@ -132,20 +132,20 @@ des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) static void des3_128_encrypt(void *ctx, u8 *dst, const u8 *src) { - struct crypt_z990_des3_128_ctx *dctx; + struct crypt_s390_des3_128_ctx *dctx; dctx = ctx; - crypt_z990_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, + crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, DES3_128_BLOCK_SIZE); } static void des3_128_decrypt(void *ctx, u8 *dst, const u8 *src) { - struct crypt_z990_des3_128_ctx *dctx; + struct crypt_s390_des3_128_ctx *dctx; dctx = ctx; - crypt_z990_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, + crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, DES3_128_BLOCK_SIZE); } @@ -153,7 +153,7 @@ static struct crypto_alg des3_128_alg = { .cra_name = "des3_ede128", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_128_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypt_z990_des3_128_ctx), + .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), .cra_u = { .cipher = { @@ -181,7 +181,7 @@ static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) { int i, ret; - struct crypt_z990_des3_192_ctx *dctx; + struct crypt_s390_des3_192_ctx *dctx; const u8* temp_key; dctx = ctx; @@ -206,20 +206,20 @@ des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) static void des3_192_encrypt(void *ctx, u8 *dst, const u8 *src) { - struct crypt_z990_des3_192_ctx *dctx; + struct crypt_s390_des3_192_ctx *dctx; dctx = ctx; - crypt_z990_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, + crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, DES3_192_BLOCK_SIZE); } static void des3_192_decrypt(void *ctx, u8 *dst, const u8 *src) { - struct crypt_z990_des3_192_ctx *dctx; + struct crypt_s390_des3_192_ctx *dctx; dctx = ctx; - crypt_z990_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, + crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, DES3_192_BLOCK_SIZE); } @@ -227,7 +227,7 @@ static struct crypto_alg des3_192_alg = { .cra_name = "des3_ede", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_192_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypt_z990_des3_192_ctx), + .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), .cra_u = { .cipher = { @@ -245,9 +245,9 @@ init(void) { int ret; - if (!crypt_z990_func_available(KM_DEA_ENCRYPT) || - !crypt_z990_func_available(KM_TDEA_128_ENCRYPT) || - !crypt_z990_func_available(KM_TDEA_192_ENCRYPT)){ + if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || + !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || + !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)){ return -ENOSYS; } @@ -262,7 +262,7 @@ init(void) return -EEXIST; } - printk(KERN_INFO "crypt_z990: des_z990 loaded.\n"); + printk(KERN_INFO "crypt_s390: des_s390 loaded.\n"); return 0; } diff --git a/arch/s390/crypto/sha1_z990.c b/arch/s390/crypto/sha1_s390.c index 298174d..98c896b 100644 --- a/arch/s390/crypto/sha1_z990.c +++ b/arch/s390/crypto/sha1_s390.c @@ -1,7 +1,7 @@ /* * Cryptographic API. * - * z990 implementation of the SHA1 Secure Hash Algorithm. + * s390 implementation of the SHA1 Secure Hash Algorithm. * * Derived from cryptoapi implementation, adapted for in-place * scatterlist interface. Originally based on the public domain @@ -28,22 +28,22 @@ #include <linux/crypto.h> #include <asm/scatterlist.h> #include <asm/byteorder.h> -#include "crypt_z990.h" +#include "crypt_s390.h" #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 -struct crypt_z990_sha1_ctx { - u64 count; - u32 state[5]; +struct crypt_s390_sha1_ctx { + u64 count; + u32 state[5]; u32 buf_len; - u8 buffer[2 * SHA1_BLOCK_SIZE]; + u8 buffer[2 * SHA1_BLOCK_SIZE]; }; static void sha1_init(void *ctx) { - static const struct crypt_z990_sha1_ctx initstate = { + static const struct crypt_s390_sha1_ctx initstate = { .state = { 0x67452301, 0xEFCDAB89, @@ -58,7 +58,7 @@ sha1_init(void *ctx) static void sha1_update(void *ctx, const u8 *data, unsigned int len) { - struct crypt_z990_sha1_ctx *sctx; + struct crypt_s390_sha1_ctx *sctx; long imd_len; sctx = ctx; @@ -69,7 +69,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len) //complete full block and hash memcpy(sctx->buffer + sctx->buf_len, data, SHA1_BLOCK_SIZE - sctx->buf_len); - crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, + crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, SHA1_BLOCK_SIZE); data += SHA1_BLOCK_SIZE - sctx->buf_len; len -= SHA1_BLOCK_SIZE - sctx->buf_len; @@ -79,7 +79,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len) //rest of data contains full blocks? imd_len = len & ~0x3ful; if (imd_len){ - crypt_z990_kimd(KIMD_SHA_1, sctx->state, data, imd_len); + crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); data += imd_len; len -= imd_len; } @@ -92,7 +92,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len) static void -pad_message(struct crypt_z990_sha1_ctx* sctx) +pad_message(struct crypt_s390_sha1_ctx* sctx) { int index; @@ -113,11 +113,11 @@ pad_message(struct crypt_z990_sha1_ctx* sctx) static void sha1_final(void* ctx, u8 *out) { - struct crypt_z990_sha1_ctx *sctx = ctx; + struct crypt_s390_sha1_ctx *sctx = ctx; //must perform manual padding pad_message(sctx); - crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); + crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); //copy digest to out memcpy(out, sctx->state, SHA1_DIGEST_SIZE); /* Wipe context */ @@ -128,7 +128,7 @@ static struct crypto_alg alg = { .cra_name = "sha1", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypt_z990_sha1_ctx), + .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .digest = { @@ -143,10 +143,10 @@ init(void) { int ret = -ENOSYS; - if (crypt_z990_func_available(KIMD_SHA_1)){ + if (crypt_s390_func_available(KIMD_SHA_1)){ ret = crypto_register_alg(&alg); if (ret == 0){ - printk(KERN_INFO "crypt_z990: sha1_z990 loaded.\n"); + printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n"); } } return ret; diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c new file mode 100644 index 0000000..b75bdbd --- /dev/null +++ b/arch/s390/crypto/sha256_s390.c @@ -0,0 +1,151 @@ +/* + * Cryptographic API. + * + * s390 implementation of the SHA256 Secure Hash Algorithm. + * + * s390 Version: + * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation + * Author(s): Jan Glauber (jang@de.ibm.com) + * + * Derived from "crypto/sha256.c" + * and "arch/s390/crypto/sha1_s390.c" + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/crypto.h> + +#include "crypt_s390.h" + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +struct s390_sha256_ctx { + u64 count; + u32 state[8]; + u8 buf[2 * SHA256_BLOCK_SIZE]; +}; + +static void sha256_init(void *ctx) +{ + struct s390_sha256_ctx *sctx = ctx; + + sctx->state[0] = 0x6a09e667; + sctx->state[1] = 0xbb67ae85; + sctx->state[2] = 0x3c6ef372; + sctx->state[3] = 0xa54ff53a; + sctx->state[4] = 0x510e527f; + sctx->state[5] = 0x9b05688c; + sctx->state[6] = 0x1f83d9ab; + sctx->state[7] = 0x5be0cd19; + sctx->count = 0; + memset(sctx->buf, 0, sizeof(sctx->buf)); +} + +static void sha256_update(void *ctx, const u8 *data, unsigned int len) +{ + struct s390_sha256_ctx *sctx = ctx; + unsigned int index; + + /* how much is already in the buffer? */ + index = sctx->count / 8 & 0x3f; + + /* update message bit length */ + sctx->count += len * 8; + + /* process one block */ + if ((index + len) >= SHA256_BLOCK_SIZE) { + memcpy(sctx->buf + index, data, SHA256_BLOCK_SIZE - index); + crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, + SHA256_BLOCK_SIZE); + data += SHA256_BLOCK_SIZE - index; + len -= SHA256_BLOCK_SIZE - index; + } + + /* anything left? */ + if (len) + memcpy(sctx->buf + index , data, len); +} + +static void pad_message(struct s390_sha256_ctx* sctx) +{ + int index, end; + + index = sctx->count / 8 & 0x3f; + end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; + + /* start pad with 1 */ + sctx->buf[index] = 0x80; + + /* pad with zeros */ + index++; + memset(sctx->buf + index, 0x00, end - index - 8); + + /* append message length */ + memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); + + sctx->count = end * 8; +} + +/* Add padding and return the message digest */ +static void sha256_final(void* ctx, u8 *out) +{ + struct s390_sha256_ctx *sctx = ctx; + + /* must perform manual padding */ + pad_message(sctx); + + crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, + sctx->count / 8); + + /* copy digest to out */ + memcpy(out, sctx->state, SHA256_DIGEST_SIZE); + + /* wipe context */ + memset(sctx, 0, sizeof *sctx); +} + +static struct crypto_alg alg = { + .cra_name = "sha256", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s390_sha256_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = SHA256_DIGEST_SIZE, + .dia_init = sha256_init, + .dia_update = sha256_update, + .dia_final = sha256_final } } +}; + +static int init(void) +{ + int ret; + + if (!crypt_s390_func_available(KIMD_SHA_256)) + return -ENOSYS; + + ret = crypto_register_alg(&alg); + if (ret != 0) + printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded."); + return ret; +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_ALIAS("sha256"); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 45d44c6..7d23edc 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,12 +1,12 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.14-rc1 -# Wed Sep 14 16:46:19 2005 +# Linux kernel version: 2.6.15-rc2 +# Mon Nov 21 13:51:30 2005 # CONFIG_MMU=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_S390=y +CONFIG_S390=y CONFIG_UID16=y # @@ -65,15 +65,31 @@ CONFIG_KMOD=y CONFIG_STOP_MACHINE=y # +# Block layer +# +# CONFIG_LBD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_DEFAULT_AS=y +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="anticipatory" + +# # Base setup # # # Processor type and features # -# CONFIG_ARCH_S390X is not set # CONFIG_64BIT is not set -CONFIG_ARCH_S390_31=y CONFIG_SMP=y CONFIG_NR_CPUS=32 CONFIG_HOTPLUG_CPU=y @@ -97,6 +113,7 @@ CONFIG_FLATMEM_MANUAL=y CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y # CONFIG_SPARSEMEM_STATIC is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 # # I/O subsystem configuration @@ -188,10 +205,18 @@ CONFIG_IPV6=y # CONFIG_NET_DIVERT is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set + +# +# QoS and/or fair queueing +# CONFIG_NET_SCHED=y CONFIG_NET_SCH_CLK_JIFFIES=y # CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set # CONFIG_NET_SCH_CLK_CPU is not set + +# +# Queueing/Scheduling +# CONFIG_NET_SCH_CBQ=m # CONFIG_NET_SCH_HTB is not set # CONFIG_NET_SCH_HFSC is not set @@ -204,8 +229,10 @@ CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_DSMARK=m # CONFIG_NET_SCH_NETEM is not set # CONFIG_NET_SCH_INGRESS is not set -CONFIG_NET_QOS=y -CONFIG_NET_ESTIMATOR=y + +# +# Classification +# CONFIG_NET_CLS=y # CONFIG_NET_CLS_BASIC is not set CONFIG_NET_CLS_TCINDEX=m @@ -214,18 +241,18 @@ CONFIG_NET_CLS_ROUTE=y CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m # CONFIG_CLS_U32_PERF is not set -# CONFIG_NET_CLS_IND is not set CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m # CONFIG_NET_EMATCH is not set # CONFIG_NET_CLS_ACT is not set CONFIG_NET_CLS_POLICE=y +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_ESTIMATOR=y # # Network testing # # CONFIG_NET_PKTGEN is not set -# CONFIG_NETFILTER_NETLINK is not set # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set @@ -276,6 +303,7 @@ CONFIG_SCSI_FC_ATTRS=y # # SCSI low-level drivers # +# CONFIG_ISCSI_TCP is not set # CONFIG_SCSI_SATA is not set # CONFIG_SCSI_DEBUG is not set CONFIG_ZFCP=y @@ -292,7 +320,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 CONFIG_BLK_DEV_INITRD=y -# CONFIG_LBD is not set # CONFIG_CDROM_PKTCDVD is not set # @@ -305,15 +332,8 @@ CONFIG_DASD_PROFILE=y CONFIG_DASD_ECKD=y CONFIG_DASD_FBA=y CONFIG_DASD_DIAG=y +CONFIG_DASD_EER=m # CONFIG_DASD_CMB is not set - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y # CONFIG_ATA_OVER_ETH is not set # @@ -378,7 +398,6 @@ CONFIG_S390_TAPE_34XX=m # CONFIG_VMLOGRDR is not set # CONFIG_VMCP is not set # CONFIG_MONREADER is not set -# CONFIG_DCSS_SHM is not set # # Cryptographic devices @@ -593,6 +612,8 @@ CONFIG_DEBUG_PREEMPT=y # CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_INFO is not set CONFIG_DEBUG_FS=y +# CONFIG_DEBUG_VM is not set +# CONFIG_RCU_TORTURE_TEST is not set # # Security options @@ -609,17 +630,19 @@ CONFIG_CRYPTO=y # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_SHA1 is not set -# CONFIG_CRYPTO_SHA1_Z990 is not set +# CONFIG_CRYPTO_SHA1_S390 is not set # CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA256_S390 is not set # CONFIG_CRYPTO_SHA512 is not set # CONFIG_CRYPTO_WP512 is not set # CONFIG_CRYPTO_TGR192 is not set # CONFIG_CRYPTO_DES is not set -# CONFIG_CRYPTO_DES_Z990 is not set +# CONFIG_CRYPTO_DES_S390 is not set # CONFIG_CRYPTO_BLOWFISH is not set # CONFIG_CRYPTO_TWOFISH is not set # CONFIG_CRYPTO_SERPENT is not set # CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_AES_S390 is not set # CONFIG_CRYPTO_CAST5 is not set # CONFIG_CRYPTO_CAST6 is not set # CONFIG_CRYPTO_TEA is not set diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 7434c32..4865e4b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -8,31 +8,26 @@ obj-y := bitmap.o traps.o time.o process.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o +obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) +obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) + extra-y += head.o init_task.o vmlinux.lds obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_S390_SUPPORT) += compat_linux.o compat_signal.o \ +obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ compat_ioctl.o compat_wrapper.o \ compat_exec_domain.o obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o -obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o -obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o - obj-$(CONFIG_VIRT_TIMER) += vtime.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o -ifeq ($(CONFIG_ARCH_S390X),y) -S390_KEXEC_OBJS += relocate_kernel64.o -else -S390_KEXEC_OBJS += relocate_kernel.o -endif +S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) - # # This is just to get the dependencies... # diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index ed877d0..41b197a 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -279,7 +279,7 @@ asmlinkage long sys32_getegid16(void) static inline long get_tv32(struct timeval *o, struct compat_timeval *i) { - return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) || + return (!access_ok(VERIFY_READ, o, sizeof(*o)) || (__get_user(o->tv_sec, &i->tv_sec) || __get_user(o->tv_usec, &i->tv_usec))); } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 4ff6808..fa2b3bc 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -467,8 +467,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) if (err) goto badframe; - /* It is more difficult to avoid calling this function than to - call it and ignore errors. */ set_fs (KERNEL_DS); do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); set_fs (old_fs); diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index d47fecb..4ef44e5 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -39,7 +39,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) if (response != NULL && rlen > 0) { memset(response, 0, rlen); -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lra 2,0(%2)\n" "lr 4,%3\n" "o 4,%6\n" @@ -55,7 +55,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) : "a" (cpcmd_buf), "d" (cmdlen), "a" (response), "d" (rlen), "m" (mask) : "cc", "2", "3", "4", "5" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lrag 2,0(%2)\n" "lgr 4,%3\n" "o 4,%6\n" @@ -73,11 +73,11 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) : "a" (cpcmd_buf), "d" (cmdlen), "a" (response), "d" (rlen), "m" (mask) : "cc", "2", "3", "4", "5" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ EBCASC(response, rlen); } else { return_len = 0; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lra 2,0(%1)\n" "lr 3,%2\n" "diag 2,3,0x8\n" @@ -85,7 +85,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) : "=d" (return_code) : "a" (cpcmd_buf), "d" (cmdlen) : "2", "3" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lrag 2,0(%1)\n" "lgr 3,%2\n" "sam31\n" @@ -95,7 +95,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) : "=d" (return_code) : "a" (cpcmd_buf), "d" (cmdlen) : "2", "3" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ } spin_unlock_irqrestore(&cpcmd_lock, flags); if (response_code != NULL) @@ -105,7 +105,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) EXPORT_SYMBOL(__cpcmd); -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT int cpcmd(const char *cmd, char *response, int rlen, int *response_code) { char *lowbuf; @@ -129,4 +129,4 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) } EXPORT_SYMBOL(cpcmd); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 4eb71ff..369ab44 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -213,7 +213,7 @@ sysc_nr_ok: mvc SP_ARGS(8,%r15),SP_R7(%r15) sysc_do_restart: larl %r10,sys_call_table -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? jno sysc_noemu larl %r10,sys_call_table_emu # use 31 bit emulation system calls @@ -361,7 +361,7 @@ sys_clone_glue: la %r2,SP_PTREGS(%r15) # load pt_regs jg sys_clone # branch to sys_clone -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT sys32_clone_glue: la %r2,SP_PTREGS(%r15) # load pt_regs jg sys32_clone # branch to sys32_clone @@ -383,7 +383,7 @@ sys_execve_glue: bnz 0(%r12) # it did fail -> store result in gpr2 b 6(%r12) # SKIP STG 2,SP_R2(15) in # system_call/sysc_tracesys -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT sys32_execve_glue: la %r2,SP_PTREGS(%r15) # load pt_regs lgr %r12,%r14 # save return address @@ -398,7 +398,7 @@ sys_sigreturn_glue: la %r2,SP_PTREGS(%r15) # load pt_regs as parameter jg sys_sigreturn # branch to sys_sigreturn -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT sys32_sigreturn_glue: la %r2,SP_PTREGS(%r15) # load pt_regs as parameter jg sys32_sigreturn # branch to sys32_sigreturn @@ -408,7 +408,7 @@ sys_rt_sigreturn_glue: la %r2,SP_PTREGS(%r15) # load pt_regs as parameter jg sys_rt_sigreturn # branch to sys_sigreturn -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT sys32_rt_sigreturn_glue: la %r2,SP_PTREGS(%r15) # load pt_regs as parameter jg sys32_rt_sigreturn # branch to sys32_sigreturn @@ -429,7 +429,7 @@ sys_sigsuspend_glue: la %r14,6(%r14) # skip store of return value jg sys_sigsuspend # branch to sys_sigsuspend -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT sys32_sigsuspend_glue: llgfr %r4,%r4 # unsigned long lgr %r5,%r4 # move mask back @@ -449,7 +449,7 @@ sys_rt_sigsuspend_glue: la %r14,6(%r14) # skip store of return value jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT sys32_rt_sigsuspend_glue: llgfr %r3,%r3 # size_t lgr %r4,%r3 # move sigsetsize parameter @@ -464,7 +464,7 @@ sys_sigaltstack_glue: la %r4,SP_PTREGS(%r15) # load pt_regs as parameter jg sys_sigaltstack # branch to sys_sigreturn -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT sys32_sigaltstack_glue: la %r4,SP_PTREGS(%r15) # load pt_regs as parameter jg sys32_sigaltstack_wrapper # branch to sys_sigreturn @@ -1009,7 +1009,7 @@ sys_call_table: #include "syscalls.S" #undef SYSCALL -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT #define SYSCALL(esa,esame,emu) .long emu .globl sys_call_table_emu diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index d31a97c..ea88d06 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -30,7 +30,7 @@ #include <asm/thread_info.h> #include <asm/page.h> -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT #define ARCH_OFFSET 4 #else #define ARCH_OFFSET 0 @@ -539,7 +539,7 @@ ipl_devno: .word 0 .endm -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT #include "head64.S" #else #include "head31.S" diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 5aa71b0..f0ed5c6 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image) pfault_fini(); #endif - if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) + if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) signal_processor(smp_processor_id(), sigp_stop); /* Wait for all other cpus to enter stopped state */ diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 607d506..c271cda 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -37,11 +37,11 @@ #define DEBUGP(fmt , ...) #endif -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT #define PLT_ENTRY_SIZE 12 -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ #define PLT_ENTRY_SIZE 20 -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ void *module_alloc(unsigned long size) { @@ -294,17 +294,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, unsigned int *ip; ip = me->module_core + me->arch.plt_offset + info->plt_offset; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ ip[1] = 0x100607f1; ip[2] = val; -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ip[1] = 0x100a0004; ip[2] = 0x07f10000; ip[3] = (unsigned int) (val >> 32); ip[4] = (unsigned int) val; -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ info->plt_initialized = 1; } if (r_type == R_390_PLTOFF16 || diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 78b64fe..a942bf2 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -235,7 +235,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, /* Save access registers to new thread structure. */ save_access_regs(&p->thread.acrs[0]); -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT /* * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the child. @@ -247,7 +247,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) p->thread.acrs[0] = regs->gprs[6]; -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ /* Save the fpu registers to new thread structure. */ save_fp_regs(&p->thread.fp_regs); p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE; @@ -260,7 +260,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, p->thread.acrs[1] = (unsigned int) regs->gprs[6]; } } -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ /* start new process with ar4 pointing to the correct address space */ p->thread.mm_segment = get_fs(); /* Don't copy debug registers */ @@ -339,16 +339,16 @@ out: */ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) { -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT /* * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the dump. */ save_fp_regs(¤t->thread.fp_regs); memcpy(fpregs, ¤t->thread.fp_regs, sizeof(s390_fp_regs)); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ save_fp_regs(fpregs); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ return 1; } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 06afa31..8ecda6d 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -42,7 +42,7 @@ #include <asm/uaccess.h> #include <asm/unistd.h> -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT #include "compat_ptrace.h" #endif @@ -59,7 +59,7 @@ FixPerRegisters(struct task_struct *task) if (per_info->single_step) { per_info->control_regs.bits.starting_addr = 0; -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT if (test_thread_flag(TIF_31BIT)) per_info->control_regs.bits.ending_addr = 0x7fffffffUL; else @@ -112,7 +112,7 @@ ptrace_disable(struct task_struct *child) clear_single_step(child); } -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT # define __ADDR_MASK 3 #else # define __ADDR_MASK 7 @@ -138,7 +138,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) * an alignment of 4. Programmers from hell... */ mask = __ADDR_MASK; -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT if (addr >= (addr_t) &dummy->regs.acrs && addr < (addr_t) &dummy->regs.orig_gpr2) mask = 3; @@ -160,7 +160,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb reading * from acrs[15]. Result is a 64 bit value. Read the @@ -218,7 +218,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) * an alignment of 4. Programmers from hell indeed... */ mask = __ADDR_MASK; -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT if (addr >= (addr_t) &dummy->regs.acrs && addr < (addr_t) &dummy->regs.orig_gpr2) mask = 3; @@ -231,13 +231,13 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy->regs.psw.mask && -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && #endif data != PSW_MASK_MERGE(PSW_USER_BITS, data)) /* Invalid psw mask. */ return -EINVAL; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.psw.addr) /* I'd like to reject addresses without the high order bit but older gdb's rely on it */ @@ -250,7 +250,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower @@ -357,7 +357,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) return ptrace_request(child, request, addr, data); } -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT /* * Now the fun part starts... a 31 bit program running in the * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, @@ -629,7 +629,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data) return peek_user(child, addr, data); if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP) return poke_user(child, addr, data); -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT if (request == PTRACE_PEEKUSR && addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT)) return peek_user_emu31(child, addr, data); @@ -695,7 +695,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data) /* Do requests that differ for 31/64 bit */ default: -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT if (test_thread_flag(TIF_31BIT)) return do_ptrace_emu31(child, request, addr, data); #endif diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c index 83cb42b..1f33951 100644 --- a/arch/s390/kernel/reipl_diag.c +++ b/arch/s390/kernel/reipl_diag.c @@ -26,7 +26,7 @@ void reipl_diag(void) " st %%r4,%0\n" " st %%r5,%1\n" ".section __ex_table,\"a\"\n" -#ifdef __s390x__ +#ifdef CONFIG_64BIT " .align 8\n" " .quad 0b, 0b\n" #else diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 31e7b19..b03847d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -427,7 +427,7 @@ setup_lowcore(void) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); @@ -562,21 +562,21 @@ setup_arch(char **cmdline_p) /* * print what head.S has found out about the machine */ -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT printk((MACHINE_IS_VM) ? "We are running under VM (31 bit mode)\n" : "We are running native (31 bit mode)\n"); printk((MACHINE_HAS_IEEE) ? "This machine has an IEEE fpu\n" : "This machine has no IEEE fpu\n"); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ printk((MACHINE_IS_VM) ? "We are running under VM (64 bit mode)\n" : "We are running native (64 bit mode)\n"); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ ROOT_DEV = Root_RAM0; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ /* * We need some free virtual space to be able to do vmalloc. @@ -585,9 +585,9 @@ setup_arch(char **cmdline_p) */ if (memory_end > 1920*1024*1024) memory_end = 1920*1024*1024; -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ memory_end = memory_size & ~0x200000UL; /* detected in head.s */ -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ init_mm.start_code = PAGE_OFFSET; init_mm.end_code = (unsigned long) &_etext; diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6e0110d..6ae4a77 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -254,9 +254,9 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; - /* It is more difficult to avoid calling this function than to - call it and ignore errors. */ - do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]); + if (do_sigaltstack(&frame->uc.uc_stack, NULL, + regs->gprs[15]) == -EFAULT) + goto badframe; return regs->gprs[2]; badframe: @@ -501,7 +501,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) if (signr > 0) { /* Whee! Actually deliver the signal. */ -#ifdef CONFIG_S390_SUPPORT +#ifdef CONFIG_COMPAT if (test_thread_flag(TIF_31BIT)) { extern void handle_signal32(unsigned long sig, struct k_sigaction *ka, diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 5856b3f..e10f4ca 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused) int cpu; static atomic_t cpuid = ATOMIC_INIT(-1); - if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) + if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) signal_processor(smp_processor_id(), sigp_stop); /* Wait for all other cpus to enter stopped state */ @@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused) { static atomic_t cpuid = ATOMIC_INIT(-1); - if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { + if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { smp_send_stop(); if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) cpcmd(vmhalt_cmd, NULL, 0, NULL); @@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused) { static atomic_t cpuid = ATOMIC_INIT(-1); - if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { + if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { smp_send_stop(); if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) cpcmd(vmpoff_cmd, NULL, 0, NULL); @@ -402,7 +402,7 @@ static void smp_ext_bitcall_others(ec_bit_sig sig) } } -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT /* * this function sends a 'purge tlb' signal to another CPU. */ @@ -416,7 +416,7 @@ void smp_ptlb_all(void) on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } EXPORT_SYMBOL(smp_ptlb_all); -#endif /* ! CONFIG_ARCH_S390X */ +#endif /* ! CONFIG_64BIT */ /* * this function sends a 'reschedule' IPI to another CPU. @@ -783,7 +783,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (stack == 0ULL) panic("smp_boot_cpus failed to allocate memory\n"); lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); -#ifndef __s390x__ +#ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = (__u32) __get_free_pages(GFP_KERNEL,0); @@ -793,7 +793,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } #endif } -#ifndef __s390x__ +#ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) ctl_set_bit(14, 29); /* enable extended save area */ #endif diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index efe6b83..6a63553 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -26,9 +26,7 @@ #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> -#ifdef CONFIG_ARCH_S390X #include <linux/personality.h> -#endif /* CONFIG_ARCH_S390X */ #include <asm/uaccess.h> #include <asm/ipc.h> @@ -121,7 +119,7 @@ out: return error; } -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT struct sel_arg_struct { unsigned long n; fd_set *inp, *outp, *exp; @@ -138,7 +136,7 @@ asmlinkage long old_select(struct sel_arg_struct __user *arg) return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); } -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ /* * sys_ipc() is the de-multiplexer for the SysV IPC calls.. @@ -211,7 +209,7 @@ asmlinkage long sys_ipc(uint call, int first, unsigned long second, return -EINVAL; } -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT asmlinkage long s390x_newuname(struct new_utsname __user *name) { int ret = sys_newuname(name); @@ -235,12 +233,12 @@ asmlinkage long s390x_personality(unsigned long personality) return ret; } -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ /* * Wrapper function for sys_fadvise64/fadvise64_64 */ -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asmlinkage long s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index c5bd36f..95d1099 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -67,13 +67,13 @@ extern pgm_check_handler_t do_monitor_call; #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT #define FOURLONG "%08lx %08lx %08lx %08lx\n" static int kstack_depth_to_print = 12; -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ #define FOURLONG "%016lx %016lx %016lx %016lx\n" static int kstack_depth_to_print = 20; -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ /* * For show_trace we have tree different stack to consider: @@ -702,12 +702,12 @@ void __init trap_init(void) pgm_check_table[0x11] = &do_dat_exception; pgm_check_table[0x12] = &translation_exception; pgm_check_table[0x13] = &special_op_exception; -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT pgm_check_table[0x38] = &do_dat_exception; pgm_check_table[0x39] = &do_dat_exception; pgm_check_table[0x3A] = &do_dat_exception; pgm_check_table[0x3B] = &do_dat_exception; -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ pgm_check_table[0x15] = &operand_exception; pgm_check_table[0x1C] = &space_switch_exception; pgm_check_table[0x1D] = &hfp_sqrt_exception; diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 89fdb38..9289fac 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -5,7 +5,7 @@ #include <asm-generic/vmlinux.lds.h> #include <linux/config.h> -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") OUTPUT_ARCH(s390) ENTRY(_start) diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index b701efa..d9b97b3 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -4,6 +4,5 @@ EXTRA_AFLAGS := -traditional -lib-y += delay.o string.o -lib-$(CONFIG_ARCH_S390_31) += uaccess.o spinlock.o -lib-$(CONFIG_ARCH_S390X) += uaccess64.o spinlock.o +lib-y += delay.o string.o spinlock.o +lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o) diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 2dc14e9..68d79c5 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -29,7 +29,7 @@ __setup("spin_retry=", spin_retry_setup); static inline void _diag44(void) { -#ifdef __s390x__ +#ifdef CONFIG_64BIT if (MACHINE_HAS_DIAG44) #endif asm volatile("diag 0,0,0x44"); diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 506a33b..a9566bc 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -143,7 +143,7 @@ dcss_diag (__u8 func, void *parameter, rx = (unsigned long) parameter; ry = (unsigned long) func; __asm__ __volatile__( -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT " sam31\n" // switch to 31 bit " diag %0,%1,0x64\n" " sam64\n" // switch back to 64 bit diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fb2607c..81ade40 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -31,17 +31,17 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT #define __FAIL_ADDR_MASK 0x7ffff000 #define __FIXUP_MASK 0x7fffffff #define __SUBCODE_MASK 0x0200 #define __PF_RES_FIELD 0ULL -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __FIXUP_MASK ~0L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ #ifdef CONFIG_SYSCTL extern int sysctl_userprocess_debug; @@ -393,11 +393,11 @@ int pfault_init(void) "2:\n" ".section __ex_table,\"a\"\n" " .align 4\n" -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT " .long 0b,1b\n" -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ " .quad 0b,1b\n" -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ ".previous" : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" ); __ctl_set_bit(0, 9); @@ -417,11 +417,11 @@ void pfault_fini(void) "0:\n" ".section __ex_table,\"a\"\n" " .align 4\n" -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT " .long 0b,0b\n" -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ " .quad 0b,0b\n" -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ ".previous" : : "a" (&refbk), "m" (refbk) : "cc" ); } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 6ec5cd9..df95338 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -44,7 +44,7 @@ void diag10(unsigned long addr) { if (addr >= 0x7ff00000) return; -#ifdef __s390x__ +#ifdef CONFIG_64BIT asm volatile ( " sam31\n" " diag %0,%0,0x10\n" @@ -106,7 +106,7 @@ extern unsigned long __initdata zholes_size[]; * paging_init() sets up the page tables */ -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT void __init paging_init(void) { pgd_t * pg_dir; @@ -175,7 +175,7 @@ void __init paging_init(void) return; } -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ void __init paging_init(void) { pgd_t * pg_dir; @@ -256,7 +256,7 @@ void __init paging_init(void) return; } -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ void __init mem_init(void) { diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index fb187e5..356257c 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -50,7 +50,7 @@ static inline unsigned long mmap_base(void) static inline int mmap_is_legacy(void) { -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT /* * Force standard allocation for 64 bit programs. */ diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile index ec34927..537b2d8 100644 --- a/arch/s390/oprofile/Makefile +++ b/arch/s390/oprofile/Makefile @@ -6,4 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprofilefs.o oprofile_stats.o \ timer_int.o ) -oprofile-y := $(DRIVER_OBJS) init.o +oprofile-y := $(DRIVER_OBJS) init.o backtrace.o diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c new file mode 100644 index 0000000..bc4b84a --- /dev/null +++ b/arch/s390/oprofile/backtrace.c @@ -0,0 +1,79 @@ +/** + * arch/s390/oprofile/backtrace.c + * + * S390 Version + * Copyright (C) 2005 IBM Corporation, IBM Deutschland Entwicklung GmbH. + * Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com> + */ + +#include <linux/oprofile.h> + +#include <asm/processor.h> /* for struct stack_frame */ + +static unsigned long +__show_trace(unsigned int *depth, unsigned long sp, + unsigned long low, unsigned long high) +{ + struct stack_frame *sf; + struct pt_regs *regs; + + while (*depth) { + sp = sp & PSW_ADDR_INSN; + if (sp < low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + (*depth)--; + oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); + + /* Follow the backchain. */ + while (*depth) { + low = sp; + sp = sf->back_chain & PSW_ADDR_INSN; + if (!sp) + break; + if (sp <= low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + (*depth)--; + oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); + + } + + if (*depth == 0) + break; + + /* Zero backchain detected, check for interrupt frame. */ + sp = (unsigned long) (sf + 1); + if (sp <= low || sp > high - sizeof(*regs)) + return sp; + regs = (struct pt_regs *) sp; + (*depth)--; + oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); + low = sp; + sp = regs->gprs[15]; + } + return sp; +} + +void s390_backtrace(struct pt_regs * const regs, unsigned int depth) +{ + unsigned long head; + struct stack_frame* head_sf; + + if (user_mode (regs)) + return; + + head = regs->gprs[15]; + head_sf = (struct stack_frame*)head; + + if (!head_sf->back_chain) + return; + + head = head_sf->back_chain; + + head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, + S390_lowcore.async_stack); + + __show_trace(&depth, head, S390_lowcore.thread_info, + S390_lowcore.thread_info + THREAD_SIZE); +} diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index a65ead0..7a99511 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -12,8 +12,12 @@ #include <linux/init.h> #include <linux/errno.h> + +extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); + int __init oprofile_arch_init(struct oprofile_operations* ops) { + ops->backtrace = s390_backtrace; return -ENODEV; } diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 5b58fad..cd13b91 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ @@ -58,7 +58,7 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts) { my_puts("Using a channel type which is configured out of " "UML\n"); - return(NULL); + return NULL; } static int not_configged_open(int input, int output, int primary, void *data, @@ -66,7 +66,7 @@ static int not_configged_open(int input, int output, int primary, void *data, { my_puts("Using a channel type which is configured out of " "UML\n"); - return(-ENODEV); + return -ENODEV; } static void not_configged_close(int fd, void *data) @@ -79,21 +79,21 @@ static int not_configged_read(int fd, char *c_out, void *data) { my_puts("Using a channel type which is configured out of " "UML\n"); - return(-EIO); + return -EIO; } static int not_configged_write(int fd, const char *buf, int len, void *data) { my_puts("Using a channel type which is configured out of " "UML\n"); - return(-EIO); + return -EIO; } static int not_configged_console_write(int fd, const char *buf, int len) { my_puts("Using a channel type which is configured out of " "UML\n"); - return(-EIO); + return -EIO; } static int not_configged_window_size(int fd, void *data, unsigned short *rows, @@ -101,7 +101,7 @@ static int not_configged_window_size(int fd, void *data, unsigned short *rows, { my_puts("Using a channel type which is configured out of " "UML\n"); - return(-ENODEV); + return -ENODEV; } static void not_configged_free(void *data) @@ -135,17 +135,17 @@ int generic_read(int fd, char *c_out, void *unused) n = os_read_file(fd, c_out, sizeof(*c_out)); if(n == -EAGAIN) - return(0); + return 0; else if(n == 0) - return(-EIO); - return(n); + return -EIO; + return n; } /* XXX Trivial wrapper around os_write_file */ int generic_write(int fd, const char *buf, int n, void *unused) { - return(os_write_file(fd, buf, n)); + return os_write_file(fd, buf, n); } int generic_window_size(int fd, void *unused, unsigned short *rows_out, @@ -156,14 +156,14 @@ int generic_window_size(int fd, void *unused, unsigned short *rows_out, ret = os_window_size(fd, &rows, &cols); if(ret < 0) - return(ret); + return ret; ret = ((*rows_out != rows) || (*cols_out != cols)); *rows_out = rows; *cols_out = cols; - return(ret); + return ret; } void generic_free(void *data) @@ -186,25 +186,29 @@ static void tty_receive_char(struct tty_struct *tty, char ch) } } - if((tty->flip.flag_buf_ptr == NULL) || + if((tty->flip.flag_buf_ptr == NULL) || (tty->flip.char_buf_ptr == NULL)) return; tty_insert_flip_char(tty, ch, TTY_NORMAL); } -static int open_one_chan(struct chan *chan, int input, int output, int primary) +static int open_one_chan(struct chan *chan) { int fd; - if(chan->opened) return(0); - if(chan->ops->open == NULL) fd = 0; - else fd = (*chan->ops->open)(input, output, primary, chan->data, - &chan->dev); - if(fd < 0) return(fd); + if(chan->opened) + return 0; + + if(chan->ops->open == NULL) + fd = 0; + else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary, + chan->data, &chan->dev); + if(fd < 0) + return fd; chan->fd = fd; chan->opened = 1; - return(0); + return 0; } int open_chan(struct list_head *chans) @@ -215,11 +219,11 @@ int open_chan(struct list_head *chans) list_for_each(ele, chans){ chan = list_entry(ele, struct chan, list); - ret = open_one_chan(chan, chan->input, chan->output, - chan->primary); - if(chan->primary) err = ret; + ret = open_one_chan(chan); + if(chan->primary) + err = ret; } - return(err); + return err; } void chan_enable_winch(struct list_head *chans, struct tty_struct *tty) @@ -236,20 +240,65 @@ void chan_enable_winch(struct list_head *chans, struct tty_struct *tty) } } -void enable_chan(struct list_head *chans, struct tty_struct *tty) +void enable_chan(struct line *line) { struct list_head *ele; struct chan *chan; - list_for_each(ele, chans){ + list_for_each(ele, &line->chan_list){ chan = list_entry(ele, struct chan, list); - if(!chan->opened) continue; + if(open_one_chan(chan)) + continue; + + if(chan->enabled) + continue; + line_setup_irq(chan->fd, chan->input, chan->output, line, + chan); + chan->enabled = 1; + } +} + +static LIST_HEAD(irqs_to_free); + +void free_irqs(void) +{ + struct chan *chan; + + while(!list_empty(&irqs_to_free)){ + chan = list_entry(irqs_to_free.next, struct chan, free_list); + list_del(&chan->free_list); - line_setup_irq(chan->fd, chan->input, chan->output, tty); + if(chan->input) + free_irq(chan->line->driver->read_irq, chan); + if(chan->output) + free_irq(chan->line->driver->write_irq, chan); + chan->enabled = 0; + } +} + +static void close_one_chan(struct chan *chan, int delay_free_irq) +{ + if(!chan->opened) + return; + + if(delay_free_irq){ + list_add(&chan->free_list, &irqs_to_free); } + else { + if(chan->input) + free_irq(chan->line->driver->read_irq, chan); + if(chan->output) + free_irq(chan->line->driver->write_irq, chan); + chan->enabled = 0; + } + if(chan->ops->close != NULL) + (*chan->ops->close)(chan->fd, chan->data); + + chan->opened = 0; + chan->fd = -1; } -void close_chan(struct list_head *chans) +void close_chan(struct list_head *chans, int delay_free_irq) { struct chan *chan; @@ -259,15 +308,37 @@ void close_chan(struct list_head *chans) * so it must be the last closed. */ list_for_each_entry_reverse(chan, chans, list) { - if(!chan->opened) continue; - if(chan->ops->close != NULL) - (*chan->ops->close)(chan->fd, chan->data); - chan->opened = 0; - chan->fd = -1; + close_one_chan(chan, delay_free_irq); + } +} + +void deactivate_chan(struct list_head *chans, int irq) +{ + struct list_head *ele; + + struct chan *chan; + list_for_each(ele, chans) { + chan = list_entry(ele, struct chan, list); + + if(chan->enabled && chan->input) + deactivate_fd(chan->fd, irq); + } +} + +void reactivate_chan(struct list_head *chans, int irq) +{ + struct list_head *ele; + struct chan *chan; + + list_for_each(ele, chans) { + chan = list_entry(ele, struct chan, list); + + if(chan->enabled && chan->input) + reactivate_fd(chan->fd, irq); } } -int write_chan(struct list_head *chans, const char *buf, int len, +int write_chan(struct list_head *chans, const char *buf, int len, int write_irq) { struct list_head *ele; @@ -285,7 +356,7 @@ int write_chan(struct list_head *chans, const char *buf, int len, reactivate_fd(chan->fd, write_irq); } } - return(ret); + return ret; } int console_write_chan(struct list_head *chans, const char *buf, int len) @@ -301,19 +372,18 @@ int console_write_chan(struct list_head *chans, const char *buf, int len) n = chan->ops->console_write(chan->fd, buf, len); if(chan->primary) ret = n; } - return(ret); + return ret; } -int console_open_chan(struct line *line, struct console *co, struct chan_opts *opts) +int console_open_chan(struct line *line, struct console *co, + struct chan_opts *opts) { - if (!list_empty(&line->chan_list)) - return 0; + int err; + + err = open_chan(&line->chan_list); + if(err) + return err; - if (0 != parse_chan_pair(line->init_str, &line->chan_list, - line->init_pri, co->index, opts)) - return -1; - if (0 != open_chan(&line->chan_list)) - return -1; printk("Console initialized on /dev/%s%d\n",co->name,co->index); return 0; } @@ -327,32 +397,36 @@ int chan_window_size(struct list_head *chans, unsigned short *rows_out, list_for_each(ele, chans){ chan = list_entry(ele, struct chan, list); if(chan->primary){ - if(chan->ops->window_size == NULL) return(0); - return(chan->ops->window_size(chan->fd, chan->data, - rows_out, cols_out)); + if(chan->ops->window_size == NULL) + return 0; + return chan->ops->window_size(chan->fd, chan->data, + rows_out, cols_out); } } - return(0); + return 0; } -void free_one_chan(struct chan *chan) +void free_one_chan(struct chan *chan, int delay_free_irq) { list_del(&chan->list); + + close_one_chan(chan, delay_free_irq); + if(chan->ops->free != NULL) (*chan->ops->free)(chan->data); - free_irq_by_fd(chan->fd); + if(chan->primary && chan->output) ignore_sigio_fd(chan->fd); kfree(chan); } -void free_chan(struct list_head *chans) +void free_chan(struct list_head *chans, int delay_free_irq) { struct list_head *ele, *next; struct chan *chan; list_for_each_safe(ele, next, chans){ chan = list_entry(ele, struct chan, list); - free_one_chan(chan); + free_one_chan(chan, delay_free_irq); } } @@ -363,23 +437,23 @@ static int one_chan_config_string(struct chan *chan, char *str, int size, if(chan == NULL){ CONFIG_CHUNK(str, size, n, "none", 1); - return(n); + return n; } CONFIG_CHUNK(str, size, n, chan->ops->type, 0); if(chan->dev == NULL){ CONFIG_CHUNK(str, size, n, "", 1); - return(n); + return n; } CONFIG_CHUNK(str, size, n, ":", 0); CONFIG_CHUNK(str, size, n, chan->dev, 0); - return(n); + return n; } -static int chan_pair_config_string(struct chan *in, struct chan *out, +static int chan_pair_config_string(struct chan *in, struct chan *out, char *str, int size, char **error_out) { int n; @@ -390,7 +464,7 @@ static int chan_pair_config_string(struct chan *in, struct chan *out, if(in == out){ CONFIG_CHUNK(str, size, n, "", 1); - return(n); + return n; } CONFIG_CHUNK(str, size, n, ",", 1); @@ -399,10 +473,10 @@ static int chan_pair_config_string(struct chan *in, struct chan *out, size -= n; CONFIG_CHUNK(str, size, n, "", 1); - return(n); + return n; } -int chan_config_string(struct list_head *chans, char *str, int size, +int chan_config_string(struct list_head *chans, char *str, int size, char **error_out) { struct list_head *ele; @@ -418,7 +492,7 @@ int chan_config_string(struct list_head *chans, char *str, int size, out = chan; } - return(chan_pair_config_string(in, out, str, size, error_out)); + return chan_pair_config_string(in, out, str, size, error_out); } struct chan_type { @@ -462,7 +536,7 @@ struct chan_type chan_table[] = { #endif }; -static struct chan *parse_chan(char *str, int pri, int device, +static struct chan *parse_chan(struct line *line, char *str, int device, struct chan_opts *opts) { struct chan_type *entry; @@ -484,36 +558,42 @@ static struct chan *parse_chan(char *str, int pri, int device, if(ops == NULL){ my_printf("parse_chan couldn't parse \"%s\"\n", str); - return(NULL); + return NULL; } - if(ops->init == NULL) return(NULL); + if(ops->init == NULL) + return NULL; data = (*ops->init)(str, device, opts); - if(data == NULL) return(NULL); + if(data == NULL) + return NULL; chan = kmalloc(sizeof(*chan), GFP_ATOMIC); - if(chan == NULL) return(NULL); + if(chan == NULL) + return NULL; *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list), + .free_list = + LIST_HEAD_INIT(chan->free_list), + .line = line, .primary = 1, .input = 0, .output = 0, .opened = 0, + .enabled = 0, .fd = -1, - .pri = pri, .ops = ops, .data = data }); - return(chan); + return chan; } -int parse_chan_pair(char *str, struct list_head *chans, int pri, int device, +int parse_chan_pair(char *str, struct line *line, int device, struct chan_opts *opts) { + struct list_head *chans = &line->chan_list; struct chan *new, *chan; char *in, *out; if(!list_empty(chans)){ chan = list_entry(chans->next, struct chan, list); - if(chan->pri >= pri) return(0); - free_chan(chans); + free_chan(chans, 0); INIT_LIST_HEAD(chans); } @@ -522,24 +602,30 @@ int parse_chan_pair(char *str, struct list_head *chans, int pri, int device, in = str; *out = '\0'; out++; - new = parse_chan(in, pri, device, opts); - if(new == NULL) return(-1); + new = parse_chan(line, in, device, opts); + if(new == NULL) + return -1; + new->input = 1; list_add(&new->list, chans); - new = parse_chan(out, pri, device, opts); - if(new == NULL) return(-1); + new = parse_chan(line, out, device, opts); + if(new == NULL) + return -1; + list_add(&new->list, chans); new->output = 1; } else { - new = parse_chan(str, pri, device, opts); - if(new == NULL) return(-1); + new = parse_chan(line, str, device, opts); + if(new == NULL) + return -1; + list_add(&new->list, chans); new->input = 1; new->output = 1; } - return(0); + return 0; } int chan_out_fd(struct list_head *chans) @@ -550,9 +636,9 @@ int chan_out_fd(struct list_head *chans) list_for_each(ele, chans){ chan = list_entry(ele, struct chan, list); if(chan->primary && chan->output) - return(chan->fd); + return chan->fd; } - return(-1); + return -1; } void chan_interrupt(struct list_head *chans, struct work_struct *task, @@ -567,9 +653,9 @@ void chan_interrupt(struct list_head *chans, struct work_struct *task, chan = list_entry(ele, struct chan, list); if(!chan->input || (chan->ops->read == NULL)) continue; do { - if((tty != NULL) && + if((tty != NULL) && (tty->flip.count >= TTY_FLIPBUF_SIZE)){ - schedule_work(task); + schedule_delayed_work(task, 1); goto out; } err = chan->ops->read(chan->fd, &c, chan->data); @@ -582,29 +668,12 @@ void chan_interrupt(struct list_head *chans, struct work_struct *task, if(chan->primary){ if(tty != NULL) tty_hangup(tty); - line_disable(tty, irq); - close_chan(chans); - free_chan(chans); + close_chan(chans, 1); return; } - else { - if(chan->ops->close != NULL) - chan->ops->close(chan->fd, chan->data); - free_one_chan(chan); - } + else close_one_chan(chan, 1); } } out: if(tty) tty_flip_buffer_push(tty); } - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index e0fdffa..46ceb25 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ @@ -23,8 +23,9 @@ static irqreturn_t line_interrupt(int irq, void *data, struct pt_regs *unused) { - struct tty_struct *tty = data; - struct line *line = tty->driver_data; + struct chan *chan = data; + struct line *line = chan->line; + struct tty_struct *tty = line->tty; if (line) chan_interrupt(&line->chan_list, &line->task, tty, irq); @@ -33,10 +34,11 @@ static irqreturn_t line_interrupt(int irq, void *data, struct pt_regs *unused) static void line_timer_cb(void *arg) { - struct tty_struct *tty = arg; - struct line *line = tty->driver_data; + struct line *line = arg; - line_interrupt(line->driver->read_irq, arg, NULL); + if(!line->throttled) + chan_interrupt(&line->chan_list, &line->task, line->tty, + line->driver->read_irq); } /* Returns the free space inside the ring buffer of this line. @@ -124,7 +126,8 @@ static int buffer_data(struct line *line, const char *buf, int len) if (len < end){ memcpy(line->tail, buf, len); line->tail += len; - } else { + } + else { /* The circular buffer is wrapping */ memcpy(line->tail, buf, end); buf += end; @@ -170,7 +173,7 @@ static int flush_buffer(struct line *line) } count = line->tail - line->head; - n = write_chan(&line->chan_list, line->head, count, + n = write_chan(&line->chan_list, line->head, count, line->driver->write_irq); if(n < 0) @@ -227,7 +230,7 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len) if (err <= 0 && (err != -EAGAIN || !ret)) ret = err; } else { - n = write_chan(&line->chan_list, buf, len, + n = write_chan(&line->chan_list, buf, len, line->driver->write_irq); if (n < 0) { ret = n; @@ -338,11 +341,36 @@ int line_ioctl(struct tty_struct *tty, struct file * file, return ret; } +void line_throttle(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + deactivate_chan(&line->chan_list, line->driver->read_irq); + line->throttled = 1; +} + +void line_unthrottle(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + line->throttled = 0; + chan_interrupt(&line->chan_list, &line->task, tty, + line->driver->read_irq); + + /* Maybe there is enough stuff pending that calling the interrupt + * throttles us again. In this case, line->throttled will be 1 + * again and we shouldn't turn the interrupt back on. + */ + if(!line->throttled) + reactivate_chan(&line->chan_list, line->driver->read_irq); +} + static irqreturn_t line_write_interrupt(int irq, void *data, struct pt_regs *unused) { - struct tty_struct *tty = data; - struct line *line = tty->driver_data; + struct chan *chan = data; + struct line *line = chan->line; + struct tty_struct *tty = line->tty; int err; /* Interrupts are enabled here because we registered the interrupt with @@ -364,7 +392,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data, if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && (tty->ldisc.write_wakeup != NULL)) (tty->ldisc.write_wakeup)(tty); - + /* BLOCKING mode * In blocking mode, everything sleeps on tty->write_wait. * Sleeping in the console driver would break non-blocking @@ -376,53 +404,29 @@ static irqreturn_t line_write_interrupt(int irq, void *data, return IRQ_HANDLED; } -int line_setup_irq(int fd, int input, int output, struct tty_struct *tty) +int line_setup_irq(int fd, int input, int output, struct line *line, void *data) { - struct line *line = tty->driver_data; struct line_driver *driver = line->driver; int err = 0, flags = SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM; if (input) err = um_request_irq(driver->read_irq, fd, IRQ_READ, - line_interrupt, flags, - driver->read_irq_name, tty); + line_interrupt, flags, + driver->read_irq_name, data); if (err) return err; if (output) err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, - line_write_interrupt, flags, - driver->write_irq_name, tty); + line_write_interrupt, flags, + driver->write_irq_name, data); line->have_irq = 1; return err; } -void line_disable(struct tty_struct *tty, int current_irq) -{ - struct line *line = tty->driver_data; - - if(!line->have_irq) - return; - - if(line->driver->read_irq == current_irq) - free_irq_later(line->driver->read_irq, tty); - else { - free_irq(line->driver->read_irq, tty); - } - - if(line->driver->write_irq == current_irq) - free_irq_later(line->driver->write_irq, tty); - else { - free_irq(line->driver->write_irq, tty); - } - - line->have_irq = 0; -} - -int line_open(struct line *lines, struct tty_struct *tty, - struct chan_opts *opts) +int line_open(struct line *lines, struct tty_struct *tty) { struct line *line; - int err = 0; + int err = -ENODEV; line = &lines[tty->index]; tty->driver_data = line; @@ -430,31 +434,29 @@ int line_open(struct line *lines, struct tty_struct *tty, /* The IRQ which takes this lock is not yet enabled and won't be run * before the end, so we don't need to use spin_lock_irq.*/ spin_lock(&line->lock); - if (tty->count == 1) { - if (!line->valid) { - err = -ENODEV; - goto out; - } - if (list_empty(&line->chan_list)) { - err = parse_chan_pair(line->init_str, &line->chan_list, - line->init_pri, tty->index, opts); - if(err) goto out; - err = open_chan(&line->chan_list); - if(err) goto out; + + tty->driver_data = line; + line->tty = tty; + if(!line->valid) + goto out; + + if(tty->count == 1){ + /* Here the device is opened, if necessary, and interrupt + * is registered. + */ + enable_chan(line); + INIT_WORK(&line->task, line_timer_cb, line); + + if(!line->sigio){ + chan_enable_winch(&line->chan_list, tty); + line->sigio = 1; } - /* Here the interrupt is registered.*/ - enable_chan(&line->chan_list, tty); - INIT_WORK(&line->task, line_timer_cb, tty); - } - if(!line->sigio){ - chan_enable_winch(&line->chan_list, tty); - line->sigio = 1; + chan_window_size(&line->chan_list, &tty->winsize.ws_row, + &tty->winsize.ws_col); } - chan_window_size(&line->chan_list, &tty->winsize.ws_row, - &tty->winsize.ws_col); - line->count++; + err = 0; out: spin_unlock(&line->lock); return err; @@ -474,15 +476,14 @@ void line_close(struct tty_struct *tty, struct file * filp) /* We ignore the error anyway! */ flush_buffer(line); - line->count--; - if (tty->count == 1) { - line_disable(tty, -1); + if(tty->count == 1){ + line->tty = NULL; tty->driver_data = NULL; - } - if((line->count == 0) && line->sigio){ - unregister_winch(tty); - line->sigio = 0; + if(line->sigio){ + unregister_winch(tty); + line->sigio = 0; + } } spin_unlock_irq(&line->lock); @@ -493,17 +494,15 @@ void close_lines(struct line *lines, int nlines) int i; for(i = 0; i < nlines; i++) - close_chan(&lines[i].chan_list); + close_chan(&lines[i].chan_list, 0); } /* Common setup code for both startup command line and mconsole initialization. * @lines contains the the array (of size @num) to modify; * @init is the setup string; - * @all_allowed is a boolean saying if we can setup the whole @lines - * at once. For instance, it will be usually true for startup init. (where we - * can use con=xterm) and false for mconsole.*/ + */ -int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed) +int line_setup(struct line *lines, unsigned int num, char *init) { int i, n; char *end; @@ -512,10 +511,11 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed /* We said con=/ssl= instead of con#=, so we are configuring all * consoles at once.*/ n = -1; - } else { + } + else { n = simple_strtoul(init, &end, 0); if(*end != '='){ - printk(KERN_ERR "line_setup failed to parse \"%s\"\n", + printk(KERN_ERR "line_setup failed to parse \"%s\"\n", init); return 0; } @@ -527,8 +527,9 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed printk("line_setup - %d out of range ((0 ... %d) allowed)\n", n, num - 1); return 0; - } else if (n >= 0){ - if (lines[n].count > 0) { + } + else if (n >= 0){ + if (lines[n].tty != NULL) { printk("line_setup - device %d is open\n", n); return 0; } @@ -539,13 +540,10 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed else { lines[n].init_str = init; lines[n].valid = 1; - } + } } - } else if(!all_allowed){ - printk("line_setup - can't configure all devices from " - "mconsole\n"); - return 0; - } else { + } + else { for(i = 0; i < num; i++){ if(lines[i].init_pri <= INIT_ALL){ lines[i].init_pri = INIT_ALL; @@ -557,18 +555,33 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed } } } - return 1; + return n == -1 ? num : n; } -int line_config(struct line *lines, unsigned int num, char *str) +int line_config(struct line *lines, unsigned int num, char *str, + struct chan_opts *opts) { - char *new = uml_strdup(str); + struct line *line; + char *new; + int n; + if(*str == '='){ + printk("line_config - can't configure all devices from " + "mconsole\n"); + return 1; + } + + new = kstrdup(str, GFP_KERNEL); if(new == NULL){ - printk("line_config - uml_strdup failed\n"); - return -ENOMEM; + printk("line_config - kstrdup failed\n"); + return 1; } - return !line_setup(lines, num, new, 0); + n = line_setup(lines, num, new); + if(n < 0) + return 1; + + line = &lines[n]; + return parse_chan_pair(line->init_str, line, n, opts); } int line_get_config(char *name, struct line *lines, unsigned int num, char *str, @@ -594,7 +607,7 @@ int line_get_config(char *name, struct line *lines, unsigned int num, char *str, spin_lock(&line->lock); if(!line->valid) CONFIG_CHUNK(str, size, n, "none", 1); - else if(line->count == 0) + else if(line->tty == NULL) CONFIG_CHUNK(str, size, n, line->init_str, 1); else n = chan_config_string(&line->chan_list, str, size, error_out); spin_unlock(&line->lock); @@ -619,14 +632,18 @@ int line_id(char **str, int *start_out, int *end_out) int line_remove(struct line *lines, unsigned int num, int n) { + int err; char config[sizeof("conxxxx=none\0")]; sprintf(config, "%d=none", n); - return !line_setup(lines, num, config, 0); + err = line_setup(lines, num, config); + if(err >= 0) + err = 0; + return err; } struct tty_driver *line_register_devfs(struct lines *set, - struct line_driver *line_driver, + struct line_driver *line_driver, struct tty_operations *ops, struct line *lines, int nlines) { @@ -655,7 +672,7 @@ struct tty_driver *line_register_devfs(struct lines *set, } for(i = 0; i < nlines; i++){ - if(!lines[i].valid) + if(!lines[i].valid) tty_unregister_device(driver, i); } @@ -663,24 +680,28 @@ struct tty_driver *line_register_devfs(struct lines *set, return driver; } -static spinlock_t winch_handler_lock; -LIST_HEAD(winch_handlers); +static DEFINE_SPINLOCK(winch_handler_lock); +static LIST_HEAD(winch_handlers); -void lines_init(struct line *lines, int nlines) +void lines_init(struct line *lines, int nlines, struct chan_opts *opts) { struct line *line; int i; - spin_lock_init(&winch_handler_lock); for(i = 0; i < nlines; i++){ line = &lines[i]; INIT_LIST_HEAD(&line->chan_list); - spin_lock_init(&line->lock); - if(line->init_str != NULL){ - line->init_str = uml_strdup(line->init_str); - if(line->init_str == NULL) - printk("lines_init - uml_strdup returned " - "NULL\n"); + + if(line->init_str == NULL) + continue; + + line->init_str = kstrdup(line->init_str, GFP_KERNEL); + if(line->init_str == NULL) + printk("lines_init - kstrdup returned NULL\n"); + + if(parse_chan_pair(line->init_str, line, i, opts)){ + printk("parse_chan_pair failed for device %d\n", i); + line->valid = 0; } } } @@ -717,8 +738,7 @@ irqreturn_t winch_interrupt(int irq, void *data, struct pt_regs *unused) tty = winch->tty; if (tty != NULL) { line = tty->driver_data; - chan_window_size(&line->chan_list, - &tty->winsize.ws_row, + chan_window_size(&line->chan_list, &tty->winsize.ws_row, &tty->winsize.ws_col); kill_pg(tty->pgrp, SIGWINCH, 1); } @@ -749,60 +769,54 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty) spin_unlock(&winch_handler_lock); if(um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, - SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, + SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, "winch", winch) < 0) printk("register_winch_irq - failed to register IRQ\n"); } +static void free_winch(struct winch *winch) +{ + list_del(&winch->list); + + if(winch->pid != -1) + os_kill_process(winch->pid, 1); + if(winch->fd != -1) + os_close_file(winch->fd); + + free_irq(WINCH_IRQ, winch); + kfree(winch); +} + static void unregister_winch(struct tty_struct *tty) { struct list_head *ele; - struct winch *winch, *found = NULL; + struct winch *winch; spin_lock(&winch_handler_lock); + list_for_each(ele, &winch_handlers){ winch = list_entry(ele, struct winch, list); if(winch->tty == tty){ - found = winch; - break; + free_winch(winch); + break; } } - if(found == NULL) - goto err; - - list_del(&winch->list); - spin_unlock(&winch_handler_lock); - - if(winch->pid != -1) - os_kill_process(winch->pid, 1); - - free_irq(WINCH_IRQ, winch); - kfree(winch); - - return; -err: spin_unlock(&winch_handler_lock); } -/* XXX: No lock as it's an exitcall... is this valid? Depending on cleanup - * order... are we sure that nothing else is done on the list? */ static void winch_cleanup(void) { - struct list_head *ele; + struct list_head *ele, *next; struct winch *winch; - list_for_each(ele, &winch_handlers){ + spin_lock(&winch_handler_lock); + + list_for_each_safe(ele, next, &winch_handlers){ winch = list_entry(ele, struct winch, list); - if(winch->fd != -1){ - /* Why is this different from the above free_irq(), - * which deactivates SIGIO? This searches the FD - * somewhere else and removes it from the list... */ - deactivate_fd(winch->fd, WINCH_IRQ); - os_close_file(winch->fd); - } - if(winch->pid != -1) - os_kill_process(winch->pid, 1); + free_winch(winch); } + + spin_unlock(&winch_handler_lock); } __uml_exitcall(winch_cleanup); @@ -811,10 +825,10 @@ char *add_xterm_umid(char *base) char *umid, *title; int len; - umid = get_umid(1); - if(umid == NULL) + umid = get_umid(); + if(*umid == '\0') return base; - + len = strlen(base) + strlen(" ()") + strlen(umid) + 1; title = kmalloc(len, GFP_KERNEL); if(title == NULL){ diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 12c9536..be61012 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -20,6 +20,7 @@ #include "linux/namei.h" #include "linux/proc_fs.h" #include "linux/syscalls.h" +#include "linux/console.h" #include "asm/irq.h" #include "asm/uaccess.h" #include "user_util.h" @@ -34,7 +35,7 @@ #include "irq_kern.h" #include "choose-mode.h" -static int do_unlink_socket(struct notifier_block *notifier, +static int do_unlink_socket(struct notifier_block *notifier, unsigned long what, void *data) { return(mconsole_unlink_socket()); @@ -46,12 +47,12 @@ static struct notifier_block reboot_notifier = { .priority = 0, }; -/* Safe without explicit locking for now. Tasklets provide their own +/* Safe without explicit locking for now. Tasklets provide their own * locking, and the interrupt handler is safe because it can't interrupt * itself and it can only happen on CPU 0. */ -LIST_HEAD(mc_requests); +static LIST_HEAD(mc_requests); static void mc_work_proc(void *unused) { @@ -60,7 +61,7 @@ static void mc_work_proc(void *unused) while(!list_empty(&mc_requests)){ local_save_flags(flags); - req = list_entry(mc_requests.next, struct mconsole_entry, + req = list_entry(mc_requests.next, struct mconsole_entry, list); list_del(&req->list); local_irq_restore(flags); @@ -69,7 +70,7 @@ static void mc_work_proc(void *unused) } } -DECLARE_WORK(mconsole_work, mc_work_proc, NULL); +static DECLARE_WORK(mconsole_work, mc_work_proc, NULL); static irqreturn_t mconsole_interrupt(int irq, void *dev_id, struct pt_regs *regs) @@ -103,8 +104,8 @@ void mconsole_version(struct mc_request *req) { char version[256]; - sprintf(version, "%s %s %s %s %s", system_utsname.sysname, - system_utsname.nodename, system_utsname.release, + sprintf(version, "%s %s %s %s %s", system_utsname.sysname, + system_utsname.nodename, system_utsname.release, system_utsname.version, system_utsname.machine); mconsole_reply(req, version, 0, 0); } @@ -348,7 +349,7 @@ static struct mc_device *mconsole_find_dev(char *name) #define CONFIG_BUF_SIZE 64 -static void mconsole_get_config(int (*get_config)(char *, char *, int, +static void mconsole_get_config(int (*get_config)(char *, char *, int, char **), struct mc_request *req, char *name) { @@ -389,7 +390,6 @@ static void mconsole_get_config(int (*get_config)(char *, char *, int, out: if(buf != default_buf) kfree(buf); - } void mconsole_config(struct mc_request *req) @@ -420,9 +420,9 @@ void mconsole_config(struct mc_request *req) void mconsole_remove(struct mc_request *req) { - struct mc_device *dev; + struct mc_device *dev; char *ptr = req->request.data, *err_msg = ""; - char error[256]; + char error[256]; int err, start, end, n; ptr += strlen("remove"); @@ -433,37 +433,112 @@ void mconsole_remove(struct mc_request *req) return; } - ptr = &ptr[strlen(dev->name)]; - - err = 1; - n = (*dev->id)(&ptr, &start, &end); - if(n < 0){ - err_msg = "Couldn't parse device number"; - goto out; - } - else if((n < start) || (n > end)){ - sprintf(error, "Invalid device number - must be between " - "%d and %d", start, end); - err_msg = error; - goto out; - } + ptr = &ptr[strlen(dev->name)]; + + err = 1; + n = (*dev->id)(&ptr, &start, &end); + if(n < 0){ + err_msg = "Couldn't parse device number"; + goto out; + } + else if((n < start) || (n > end)){ + sprintf(error, "Invalid device number - must be between " + "%d and %d", start, end); + err_msg = error; + goto out; + } err = (*dev->remove)(n); - switch(err){ - case -ENODEV: - err_msg = "Device doesn't exist"; - break; - case -EBUSY: - err_msg = "Device is currently open"; - break; - default: - break; - } - out: + switch(err){ + case -ENODEV: + err_msg = "Device doesn't exist"; + break; + case -EBUSY: + err_msg = "Device is currently open"; + break; + default: + break; + } +out: mconsole_reply(req, err_msg, err, 0); } +static DEFINE_SPINLOCK(console_lock); +static LIST_HEAD(clients); +static char console_buf[MCONSOLE_MAX_DATA]; +static int console_index = 0; + +static void console_write(struct console *console, const char *string, + unsigned len) +{ + struct list_head *ele; + int n; + + if(list_empty(&clients)) + return; + + while(1){ + n = min(len, ARRAY_SIZE(console_buf) - console_index); + strncpy(&console_buf[console_index], string, n); + console_index += n; + string += n; + len -= n; + if(len == 0) + return; + + list_for_each(ele, &clients){ + struct mconsole_entry *entry; + + entry = list_entry(ele, struct mconsole_entry, list); + mconsole_reply_len(&entry->request, console_buf, + console_index, 0, 1); + } + + console_index = 0; + } +} + +static struct console mc_console = { .name = "mc", + .write = console_write, + .flags = CON_PRINTBUFFER | CON_ENABLED, + .index = -1 }; + +static int mc_add_console(void) +{ + register_console(&mc_console); + return 0; +} + +late_initcall(mc_add_console); + +static void with_console(struct mc_request *req, void (*proc)(void *), + void *arg) +{ + struct mconsole_entry entry; + unsigned long flags; + + INIT_LIST_HEAD(&entry.list); + entry.request = *req; + list_add(&entry.list, &clients); + spin_lock_irqsave(&console_lock, flags); + + (*proc)(arg); + + mconsole_reply_len(req, console_buf, console_index, 0, 0); + console_index = 0; + + spin_unlock_irqrestore(&console_lock, flags); + list_del(&entry.list); +} + #ifdef CONFIG_MAGIC_SYSRQ +static void sysrq_proc(void *arg) +{ + char *op = arg; + + handle_sysrq(*op, ¤t->thread.regs, NULL); +} + void mconsole_sysrq(struct mc_request *req) { char *ptr = req->request.data; @@ -471,8 +546,13 @@ void mconsole_sysrq(struct mc_request *req) ptr += strlen("sysrq"); while(isspace(*ptr)) ptr++; - mconsole_reply(req, "", 0, 0); - handle_sysrq(*ptr, ¤t->thread.regs, NULL); + /* With 'b', the system will shut down without a chance to reply, + * so in this case, we reply first. + */ + if(*ptr == 'b') + mconsole_reply(req, "", 0, 0); + + with_console(req, sysrq_proc, ptr); } #else void mconsole_sysrq(struct mc_request *req) @@ -481,6 +561,14 @@ void mconsole_sysrq(struct mc_request *req) } #endif +static void stack_proc(void *arg) +{ + struct task_struct *from = current, *to = arg; + + to->thread.saved_task = from; + switch_to(from, to, from); +} + /* Mconsole stack trace * Added by Allan Graves, Jeff Dike * Dumps a stacks registers to the linux console. @@ -488,37 +576,34 @@ void mconsole_sysrq(struct mc_request *req) */ void do_stack(struct mc_request *req) { - char *ptr = req->request.data; - int pid_requested= -1; - struct task_struct *from = NULL; + char *ptr = req->request.data; + int pid_requested= -1; + struct task_struct *from = NULL; struct task_struct *to = NULL; - /* Would be nice: - * 1) Send showregs output to mconsole. + /* Would be nice: + * 1) Send showregs output to mconsole. * 2) Add a way to stack dump all pids. */ - ptr += strlen("stack"); - while(isspace(*ptr)) ptr++; - - /* Should really check for multiple pids or reject bad args here */ - /* What do the arguments in mconsole_reply mean? */ - if(sscanf(ptr, "%d", &pid_requested) == 0){ - mconsole_reply(req, "Please specify a pid", 1, 0); - return; - } + ptr += strlen("stack"); + while(isspace(*ptr)) ptr++; - from = current; - to = find_task_by_pid(pid_requested); + /* Should really check for multiple pids or reject bad args here */ + /* What do the arguments in mconsole_reply mean? */ + if(sscanf(ptr, "%d", &pid_requested) == 0){ + mconsole_reply(req, "Please specify a pid", 1, 0); + return; + } - if((to == NULL) || (pid_requested == 0)) { - mconsole_reply(req, "Couldn't find that pid", 1, 0); - return; - } - to->thread.saved_task = current; + from = current; - switch_to(from, to, from); - mconsole_reply(req, "Stack Dumped to console and message log", 0, 0); + to = find_task_by_pid(pid_requested); + if((to == NULL) || (pid_requested == 0)) { + mconsole_reply(req, "Couldn't find that pid", 1, 0); + return; + } + with_console(req, stack_proc, to); } void mconsole_stack(struct mc_request *req) @@ -534,9 +619,9 @@ void mconsole_stack(struct mc_request *req) /* Changed by mconsole_setup, which is __setup, and called before SMP is * active. */ -static char *notify_socket = NULL; +static char *notify_socket = NULL; -int mconsole_init(void) +static int mconsole_init(void) { /* long to avoid size mismatch warnings from gcc */ long sock; @@ -563,16 +648,16 @@ int mconsole_init(void) } if(notify_socket != NULL){ - notify_socket = uml_strdup(notify_socket); + notify_socket = kstrdup(notify_socket, GFP_KERNEL); if(notify_socket != NULL) mconsole_notify(notify_socket, MCONSOLE_SOCKET, - mconsole_socket_name, + mconsole_socket_name, strlen(mconsole_socket_name) + 1); else printk(KERN_ERR "mconsole_setup failed to strdup " "string\n"); } - printk("mconsole (version %d) initialized on %s\n", + printk("mconsole (version %d) initialized on %s\n", MCONSOLE_VERSION, mconsole_socket_name); return(0); } @@ -585,7 +670,7 @@ static int write_proc_mconsole(struct file *file, const char __user *buffer, char *buf; buf = kmalloc(count + 1, GFP_KERNEL); - if(buf == NULL) + if(buf == NULL) return(-ENOMEM); if(copy_from_user(buf, buffer, count)){ @@ -661,7 +746,7 @@ static int notify_panic(struct notifier_block *self, unsigned long unused1, if(notify_socket == NULL) return(0); - mconsole_notify(notify_socket, MCONSOLE_PANIC, message, + mconsole_notify(notify_socket, MCONSOLE_PANIC, message, strlen(message) + 1); return(0); } @@ -686,14 +771,3 @@ char *mconsole_notify_socket(void) } EXPORT_SYMBOL(mconsole_notify_socket); - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c index 310c1f8..4b109fe 100644 --- a/arch/um/drivers/mconsole_user.c +++ b/arch/um/drivers/mconsole_user.c @@ -122,12 +122,12 @@ int mconsole_get_request(int fd, struct mc_request *req) return(1); } -int mconsole_reply(struct mc_request *req, char *str, int err, int more) +int mconsole_reply_len(struct mc_request *req, const char *str, int total, + int err, int more) { struct mconsole_reply reply; - int total, len, n; + int len, n; - total = strlen(str); do { reply.err = err; @@ -155,6 +155,12 @@ int mconsole_reply(struct mc_request *req, char *str, int err, int more) return(0); } +int mconsole_reply(struct mc_request *req, const char *str, int err, int more) +{ + return mconsole_reply_len(req, str, strlen(str), err, more); +} + + int mconsole_unlink_socket(void) { unlink(mconsole_socket_name); diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 84c73a3..fb1f9fb 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -34,7 +34,7 @@ #define DRIVER_NAME "uml-netdev" static DEFINE_SPINLOCK(opened_lock); -LIST_HEAD(opened); +static LIST_HEAD(opened); static int uml_net_rx(struct net_device *dev) { @@ -150,6 +150,7 @@ static int uml_net_close(struct net_device *dev) if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user); lp->fd = -1; + list_del(&lp->list); spin_unlock(&lp->lock); return 0; @@ -266,7 +267,7 @@ void uml_net_user_timer_expire(unsigned long _conn) } static DEFINE_SPINLOCK(devices_lock); -static struct list_head devices = LIST_HEAD_INIT(devices); +static LIST_HEAD(devices); static struct platform_driver uml_net_driver = { .driver = { @@ -586,7 +587,7 @@ static int net_config(char *str) err = eth_parse(str, &n, &str); if(err) return(err); - str = uml_strdup(str); + str = kstrdup(str, GFP_KERNEL); if(str == NULL){ printk(KERN_ERR "net_config failed to strdup string\n"); return(-1); @@ -715,6 +716,7 @@ static void close_devices(void) list_for_each(ele, &opened){ lp = list_entry(ele, struct uml_net_private, list); + free_irq(lp->dev->irq, lp->dev); if((lp->close != NULL) && (lp->fd >= 0)) (*lp->close)(lp->fd, &lp->user); if(lp->remove != NULL) (*lp->remove)(&lp->user); diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c index 62e04ec..a32ef55 100644 --- a/arch/um/drivers/ssl.c +++ b/arch/um/drivers/ssl.c @@ -69,7 +69,7 @@ static struct line_driver driver = { .name = "ssl", .config = ssl_config, .get_config = ssl_get_config, - .id = line_id, + .id = line_id, .remove = ssl_remove, }, }; @@ -84,26 +84,23 @@ static struct lines lines = LINES_INIT(NR_PORTS); static int ssl_config(char *str) { - return(line_config(serial_lines, - sizeof(serial_lines)/sizeof(serial_lines[0]), str)); + return line_config(serial_lines, ARRAY_SIZE(serial_lines), str, &opts); } static int ssl_get_config(char *dev, char *str, int size, char **error_out) { - return(line_get_config(dev, serial_lines, - sizeof(serial_lines)/sizeof(serial_lines[0]), - str, size, error_out)); + return line_get_config(dev, serial_lines, ARRAY_SIZE(serial_lines), str, + size, error_out); } static int ssl_remove(int n) { - return line_remove(serial_lines, - sizeof(serial_lines)/sizeof(serial_lines[0]), n); + return line_remove(serial_lines, ARRAY_SIZE(serial_lines), n); } int ssl_open(struct tty_struct *tty, struct file *filp) { - return line_open(serial_lines, tty, &opts); + return line_open(serial_lines, tty); } #if 0 @@ -112,16 +109,6 @@ static void ssl_flush_buffer(struct tty_struct *tty) return; } -static void ssl_throttle(struct tty_struct * tty) -{ - printk(KERN_ERR "Someone should implement ssl_throttle\n"); -} - -static void ssl_unthrottle(struct tty_struct * tty) -{ - printk(KERN_ERR "Someone should implement ssl_unthrottle\n"); -} - static void ssl_stop(struct tty_struct *tty) { printk(KERN_ERR "Someone should implement ssl_stop\n"); @@ -148,9 +135,9 @@ static struct tty_operations ssl_ops = { .flush_chars = line_flush_chars, .set_termios = line_set_termios, .ioctl = line_ioctl, + .throttle = line_throttle, + .unthrottle = line_unthrottle, #if 0 - .throttle = ssl_throttle, - .unthrottle = ssl_unthrottle, .stop = ssl_stop, .start = ssl_start, .hangup = ssl_hangup, @@ -183,7 +170,7 @@ static int ssl_console_setup(struct console *co, char *options) { struct line *line = &serial_lines[co->index]; - return console_open_chan(line,co,&opts); + return console_open_chan(line, co, &opts); } static struct console ssl_cons = { @@ -199,12 +186,13 @@ int ssl_init(void) { char *new_title; - printk(KERN_INFO "Initializing software serial port version %d\n", + printk(KERN_INFO "Initializing software serial port version %d\n", ssl_version); ssl_driver = line_register_devfs(&lines, &driver, &ssl_ops, - serial_lines, ARRAY_SIZE(serial_lines)); + serial_lines, + ARRAY_SIZE(serial_lines)); - lines_init(serial_lines, sizeof(serial_lines)/sizeof(serial_lines[0])); + lines_init(serial_lines, ARRAY_SIZE(serial_lines), &opts); new_title = add_xterm_umid(opts.xterm_title); if (new_title != NULL) @@ -212,7 +200,7 @@ int ssl_init(void) ssl_init_done = 1; register_console(&ssl_cons); - return(0); + return 0; } late_initcall(ssl_init); @@ -220,16 +208,13 @@ static void ssl_exit(void) { if (!ssl_init_done) return; - close_lines(serial_lines, - sizeof(serial_lines)/sizeof(serial_lines[0])); + close_lines(serial_lines, ARRAY_SIZE(serial_lines)); } __uml_exitcall(ssl_exit); static int ssl_chan_setup(char *str) { - return(line_setup(serial_lines, - sizeof(serial_lines)/sizeof(serial_lines[0]), - str, 1)); + return line_setup(serial_lines, ARRAY_SIZE(serial_lines), str); } __setup("ssl", ssl_chan_setup); diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c index 005aa63..61db8b2 100644 --- a/arch/um/drivers/stdio_console.c +++ b/arch/um/drivers/stdio_console.c @@ -75,7 +75,7 @@ static struct line_driver driver = { .name = "con", .config = con_config, .get_config = con_get_config, - .id = line_id, + .id = line_id, .remove = con_remove, }, }; @@ -86,28 +86,27 @@ static struct lines console_lines = LINES_INIT(MAX_TTYS); * individual elements are protected by individual semaphores. */ struct line vts[MAX_TTYS] = { LINE_INIT(CONFIG_CON_ZERO_CHAN, &driver), - [ 1 ... MAX_TTYS - 1 ] = + [ 1 ... MAX_TTYS - 1 ] = LINE_INIT(CONFIG_CON_CHAN, &driver) }; static int con_config(char *str) { - return(line_config(vts, sizeof(vts)/sizeof(vts[0]), str)); + return line_config(vts, ARRAY_SIZE(vts), str, &opts); } static int con_get_config(char *dev, char *str, int size, char **error_out) { - return(line_get_config(dev, vts, sizeof(vts)/sizeof(vts[0]), str, - size, error_out)); + return line_get_config(dev, vts, ARRAY_SIZE(vts), str, size, error_out); } static int con_remove(int n) { - return line_remove(vts, sizeof(vts)/sizeof(vts[0]), n); + return line_remove(vts, ARRAY_SIZE(vts), n); } static int con_open(struct tty_struct *tty, struct file *filp) { - return line_open(vts, tty, &opts); + return line_open(vts, tty); } static int con_init_done = 0; @@ -117,16 +116,18 @@ static struct tty_operations console_ops = { .close = line_close, .write = line_write, .put_char = line_put_char, - .write_room = line_write_room, + .write_room = line_write_room, .chars_in_buffer = line_chars_in_buffer, .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, .ioctl = line_ioctl, + .throttle = line_throttle, + .unthrottle = line_unthrottle, }; static void uml_console_write(struct console *console, const char *string, - unsigned len) + unsigned len) { struct line *line = &vts[console->index]; unsigned long flags; @@ -146,7 +147,7 @@ static int uml_console_setup(struct console *co, char *options) { struct line *line = &vts[co->index]; - return console_open_chan(line,co,&opts); + return console_open_chan(line, co, &opts); } static struct console stdiocons = { @@ -156,7 +157,7 @@ static struct console stdiocons = { .setup = uml_console_setup, .flags = CON_PRINTBUFFER, .index = -1, - .data = &vts, + .data = &vts, }; int stdio_init(void) @@ -166,11 +167,11 @@ int stdio_init(void) console_driver = line_register_devfs(&console_lines, &driver, &console_ops, vts, ARRAY_SIZE(vts)); - if (NULL == console_driver) + if (console_driver == NULL) return -1; printk(KERN_INFO "Initialized stdio console driver\n"); - lines_init(vts, sizeof(vts)/sizeof(vts[0])); + lines_init(vts, ARRAY_SIZE(vts), &opts); new_title = add_xterm_umid(opts.xterm_title); if(new_title != NULL) @@ -178,7 +179,7 @@ int stdio_init(void) con_init_done = 1; register_console(&stdiocons); - return(0); + return 0; } late_initcall(stdio_init); @@ -186,13 +187,13 @@ static void console_exit(void) { if (!con_init_done) return; - close_lines(vts, sizeof(vts)/sizeof(vts[0])); + close_lines(vts, ARRAY_SIZE(vts)); } __uml_exitcall(console_exit); static int console_chan_setup(char *str) { - return(line_setup(vts, sizeof(vts)/sizeof(vts[0]), str, 1)); + return line_setup(vts, ARRAY_SIZE(vts), str); } __setup("con", console_chan_setup); __channel_help(console_chan_setup, "con"); diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 9389891..73f9652 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -706,7 +706,7 @@ static int ubd_config(char *str) { int n, err; - str = uml_strdup(str); + str = kstrdup(str, GFP_KERNEL); if(str == NULL){ printk(KERN_ERR "ubd_config failed to strdup string\n"); return(1); @@ -1387,15 +1387,6 @@ int io_thread(void *arg) printk("io_thread - write failed, fd = %d, err = %d\n", kernel_fd, -n); } -} -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ + return 0; +} diff --git a/arch/um/include/chan_kern.h b/arch/um/include/chan_kern.h index da9a671..1bb5e9d 100644 --- a/arch/um/include/chan_kern.h +++ b/arch/um/include/chan_kern.h @@ -14,21 +14,23 @@ struct chan { struct list_head list; + struct list_head free_list; + struct line *line; char *dev; unsigned int primary:1; unsigned int input:1; unsigned int output:1; unsigned int opened:1; + unsigned int enabled:1; int fd; - enum chan_init_pri pri; struct chan_ops *ops; void *data; }; extern void chan_interrupt(struct list_head *chans, struct work_struct *task, struct tty_struct *tty, int irq); -extern int parse_chan_pair(char *str, struct list_head *chans, int pri, - int device, struct chan_opts *opts); +extern int parse_chan_pair(char *str, struct line *line, int device, + struct chan_opts *opts); extern int open_chan(struct list_head *chans); extern int write_chan(struct list_head *chans, const char *buf, int len, int write_irq); @@ -36,9 +38,11 @@ extern int console_write_chan(struct list_head *chans, const char *buf, int len); extern int console_open_chan(struct line *line, struct console *co, struct chan_opts *opts); -extern void close_chan(struct list_head *chans); +extern void deactivate_chan(struct list_head *chans, int irq); +extern void reactivate_chan(struct list_head *chans, int irq); extern void chan_enable_winch(struct list_head *chans, struct tty_struct *tty); -extern void enable_chan(struct list_head *chans, struct tty_struct *tty); +extern void enable_chan(struct line *line); +extern void close_chan(struct list_head *chans, int delay_free_irq); extern int chan_window_size(struct list_head *chans, unsigned short *rows_out, unsigned short *cols_out); @@ -47,14 +51,3 @@ extern int chan_config_string(struct list_head *chans, char *str, int size, char **error_out); #endif - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/choose-mode.h b/arch/um/include/choose-mode.h index f25fa83..b87b36a 100644 --- a/arch/um/include/choose-mode.h +++ b/arch/um/include/choose-mode.h @@ -23,6 +23,9 @@ static inline void *__choose_mode(void *tt, void *skas) { #elif defined(UML_CONFIG_MODE_TT) #define CHOOSE_MODE(tt, skas) (tt) + +#else +#error CONFIG_MODE_SKAS and CONFIG_MODE_TT are both disabled #endif #define CHOOSE_MODE_PROC(tt, skas, args...) \ diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h index f724b71..b61deb8 100644 --- a/arch/um/include/irq_user.h +++ b/arch/um/include/irq_user.h @@ -18,19 +18,8 @@ extern int deactivate_all_fds(void); extern void forward_interrupts(int pid); extern void init_irq_signals(int on_sigstack); extern void forward_ipi(int fd, int pid); -extern void free_irq_later(int irq, void *dev_id); extern int activate_ipi(int fd, int pid); extern unsigned long irq_lock(void); extern void irq_unlock(unsigned long flags); -#endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ +#endif diff --git a/arch/um/include/kern.h b/arch/um/include/kern.h index 1e31707..7d223be 100644 --- a/arch/um/include/kern.h +++ b/arch/um/include/kern.h @@ -17,7 +17,7 @@ extern int errno; extern int clone(int (*proc)(void *), void *sp, int flags, void *data); extern int sleep(int); -extern int printf(char *fmt, ...); +extern int printf(const char *fmt, ...); extern char *strerror(int errnum); extern char *ptsname(int __fd); extern int munmap(void *, int); @@ -35,15 +35,6 @@ extern int read(unsigned int, char *, int); extern int pipe(int *); extern int sched_yield(void); extern int ptrace(int op, int pid, long addr, long data); + #endif -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/line.h b/arch/um/include/line.h index 5323d22..6f4d680 100644 --- a/arch/um/include/line.h +++ b/arch/um/include/line.h @@ -32,11 +32,13 @@ struct line_driver { }; struct line { + struct tty_struct *tty; char *init_str; int init_pri; struct list_head chan_list; int valid; int count; + int throttled; /*This lock is actually, mostly, local to*/ spinlock_t lock; @@ -58,14 +60,15 @@ struct line { #define LINE_INIT(str, d) \ { init_str : str, \ init_pri : INIT_STATIC, \ - chan_list : { }, \ valid : 1, \ + throttled : 0, \ + lock : SPIN_LOCK_UNLOCKED, \ buffer : NULL, \ head : NULL, \ tail : NULL, \ sigio : 0, \ - driver : d, \ - have_irq : 0 } + driver : d, \ + have_irq : 0 } struct lines { int num; @@ -74,11 +77,11 @@ struct lines { #define LINES_INIT(n) { num : n } extern void line_close(struct tty_struct *tty, struct file * filp); -extern int line_open(struct line *lines, struct tty_struct *tty, - struct chan_opts *opts); -extern int line_setup(struct line *lines, unsigned int sizeof_lines, char *init, - int all_allowed); -extern int line_write(struct tty_struct *tty, const unsigned char *buf, int len); +extern int line_open(struct line *lines, struct tty_struct *tty); +extern int line_setup(struct line *lines, unsigned int sizeof_lines, + char *init); +extern int line_write(struct tty_struct *tty, const unsigned char *buf, + int len); extern void line_put_char(struct tty_struct *tty, unsigned char ch); extern void line_set_termios(struct tty_struct *tty, struct termios * old); extern int line_chars_in_buffer(struct tty_struct *tty); @@ -87,23 +90,27 @@ extern void line_flush_chars(struct tty_struct *tty); extern int line_write_room(struct tty_struct *tty); extern int line_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg); +extern void line_throttle(struct tty_struct *tty); +extern void line_unthrottle(struct tty_struct *tty); extern char *add_xterm_umid(char *base); -extern int line_setup_irq(int fd, int input, int output, struct tty_struct *tty); +extern int line_setup_irq(int fd, int input, int output, struct line *line, + void *data); extern void line_close_chan(struct line *line); -extern void line_disable(struct tty_struct *tty, int current_irq); -extern struct tty_driver * line_register_devfs(struct lines *set, - struct line_driver *line_driver, +extern struct tty_driver * line_register_devfs(struct lines *set, + struct line_driver *line_driver, struct tty_operations *driver, struct line *lines, int nlines); -extern void lines_init(struct line *lines, int nlines); +extern void lines_init(struct line *lines, int nlines, struct chan_opts *opts); extern void close_lines(struct line *lines, int nlines); -extern int line_config(struct line *lines, unsigned int sizeof_lines, char *str); +extern int line_config(struct line *lines, unsigned int sizeof_lines, + char *str, struct chan_opts *opts); extern int line_id(char **str, int *start_out, int *end_out); extern int line_remove(struct line *lines, unsigned int sizeof_lines, int n); -extern int line_get_config(char *dev, struct line *lines, unsigned int sizeof_lines, char *str, +extern int line_get_config(char *dev, struct line *lines, + unsigned int sizeof_lines, char *str, int size, char **error_out); #endif diff --git a/arch/um/include/mconsole.h b/arch/um/include/mconsole.h index b1b512f..58f67d3 100644 --- a/arch/um/include/mconsole.h +++ b/arch/um/include/mconsole.h @@ -32,7 +32,7 @@ struct mconsole_reply { struct mconsole_notify { u32 magic; - u32 version; + u32 version; enum { MCONSOLE_SOCKET, MCONSOLE_PANIC, MCONSOLE_HANG, MCONSOLE_USER_NOTIFY } type; u32 len; @@ -66,7 +66,9 @@ struct mc_request extern char mconsole_socket_name[]; extern int mconsole_unlink_socket(void); -extern int mconsole_reply(struct mc_request *req, char *reply, int err, +extern int mconsole_reply_len(struct mc_request *req, const char *reply, + int len, int err, int more); +extern int mconsole_reply(struct mc_request *req, const char *str, int err, int more); extern void mconsole_version(struct mc_request *req); @@ -84,7 +86,7 @@ extern void mconsole_proc(struct mc_request *req); extern void mconsole_stack(struct mc_request *req); extern int mconsole_get_request(int fd, struct mc_request *req); -extern int mconsole_notify(char *sock_name, int type, const void *data, +extern int mconsole_notify(char *sock_name, int type, const void *data, int len); extern char *mconsole_notify_socket(void); extern void lock_notify(void); diff --git a/arch/um/include/os.h b/arch/um/include/os.h index 2cccfa5..c279ee6 100644 --- a/arch/um/include/os.h +++ b/arch/um/include/os.h @@ -213,15 +213,10 @@ extern int run_helper_thread(int (*proc)(void *), void *arg, int stack_order); extern int helper_wait(int pid); -#endif +/* umid.c */ -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ +extern int umid_file_name(char *name, char *buf, int len); +extern int set_umid(char *name); +extern char *get_umid(void); + +#endif diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h index bb505e0..b998400 100644 --- a/arch/um/include/user_util.h +++ b/arch/um/include/user_util.h @@ -64,7 +64,6 @@ extern void setup_machinename(char *machine_out); extern void setup_hostinfo(void); extern void do_exec(int old_pid, int new_pid); extern void tracer_panic(char *msg, ...); -extern char *get_umid(int only_if_set); extern void do_longjmp(void *p, int val); extern int detach(int pid, int sig); extern int attach(int pid); diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 3de9d21..6f77005 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -10,8 +10,8 @@ obj-y = config.o exec_kern.o exitcode.o \ init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \ process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \ signal_kern.o signal_user.o smp.o syscall_kern.o sysrq.o time.o \ - time_kern.o tlb.o trap_kern.o trap_user.o uaccess.o um_arch.o \ - umid.o user_util.o + time_kern.o tlb.o trap_kern.o trap_user.o uaccess.o um_arch.o umid.o \ + user_util.o obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o obj-$(CONFIG_GPROF) += gprof_syms.o @@ -24,7 +24,7 @@ obj-$(CONFIG_MODE_SKAS) += skas/ user-objs-$(CONFIG_TTY_LOG) += tty_log.o -USER_OBJS := $(user-objs-y) config.o time.o tty_log.o umid.o user_util.o +USER_OBJS := $(user-objs-y) config.o time.o tty_log.o user_util.o include arch/um/scripts/Makefile.rules diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c index c3ccaf2..50a2aa3 100644 --- a/arch/um/kernel/irq_user.c +++ b/arch/um/kernel/irq_user.c @@ -29,7 +29,6 @@ struct irq_fd { int pid; int events; int current_events; - int freed; }; static struct irq_fd *active_fds = NULL; @@ -41,9 +40,11 @@ static int pollfds_size = 0; extern int io_count, intr_count; +extern void free_irqs(void); + void sigio_handler(int sig, union uml_pt_regs *regs) { - struct irq_fd *irq_fd, *next; + struct irq_fd *irq_fd; int i, n; if(smp_sigio_handler()) return; @@ -66,29 +67,15 @@ void sigio_handler(int sig, union uml_pt_regs *regs) irq_fd = irq_fd->next; } - for(irq_fd = active_fds; irq_fd != NULL; irq_fd = next){ - next = irq_fd->next; + for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ if(irq_fd->current_events != 0){ irq_fd->current_events = 0; do_IRQ(irq_fd->irq, regs); - - /* This is here because the next irq may be - * freed in the handler. If a console goes - * away, both the read and write irqs will be - * freed. After do_IRQ, ->next will point to - * a good IRQ. - * Irqs can't be freed inside their handlers, - * so the next best thing is to have them - * marked as needing freeing, so that they - * can be freed here. - */ - next = irq_fd->next; - if(irq_fd->freed){ - free_irq(irq_fd->irq, irq_fd->id); - } } } } + + free_irqs(); } int activate_ipi(int fd, int pid) @@ -136,8 +123,7 @@ int activate_fd(int irq, int fd, int type, void *dev_id) .irq = irq, .pid = pid, .events = events, - .current_events = 0, - .freed = 0 } ); + .current_events = 0 } ); /* Critical section - locked by a spinlock because this stuff can * be changed from interrupt handlers. The stuff above is done @@ -313,26 +299,6 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) return(irq); } -void free_irq_later(int irq, void *dev_id) -{ - struct irq_fd *irq_fd; - unsigned long flags; - - flags = irq_lock(); - for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ - if((irq_fd->irq == irq) && (irq_fd->id == dev_id)) - break; - } - if(irq_fd == NULL){ - printk("free_irq_later found no irq, irq = %d, " - "dev_id = 0x%p\n", irq, dev_id); - goto out; - } - irq_fd->freed = 1; - out: - irq_unlock(flags); -} - void reactivate_fd(int fd, int irqnum) { struct irq_fd *irq; diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c index 34b54a3..651abf2 100644 --- a/arch/um/kernel/process_kern.c +++ b/arch/um/kernel/process_kern.c @@ -324,10 +324,6 @@ int user_context(unsigned long sp) return(stack != (unsigned long) current_thread); } -extern void remove_umid_dir(void); - -__uml_exitcall(remove_umid_dir); - extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; void do_uml_exitcalls(void) diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c index 48b1f64..62e5cfd 100644 --- a/arch/um/kernel/sigio_user.c +++ b/arch/um/kernel/sigio_user.c @@ -216,6 +216,8 @@ static int write_sigio_thread(void *unused) "err = %d\n", -n); } } + + return 0; } static int need_poll(int n) diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 142a949..26626b2 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -146,8 +146,8 @@ void set_cmdline(char *cmd) if(CHOOSE_MODE(honeypot, 0)) return; - umid = get_umid(1); - if(umid != NULL){ + umid = get_umid(); + if(*umid != '\0'){ snprintf(argv1_begin, (argv1_end - argv1_begin) * sizeof(*ptr), "(%s) ", umid); diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c index 0b21d59..4eaee82 100644 --- a/arch/um/kernel/umid.c +++ b/arch/um/kernel/umid.c @@ -3,61 +3,30 @@ * Licensed under the GPL */ -#include <stdio.h> -#include <unistd.h> -#include <errno.h> -#include <string.h> -#include <stdlib.h> -#include <dirent.h> -#include <signal.h> -#include <sys/stat.h> -#include <sys/param.h> -#include "user.h" -#include "umid.h" +#include "asm/errno.h" #include "init.h" #include "os.h" -#include "user_util.h" -#include "choose-mode.h" +#include "kern.h" +#include "linux/kernel.h" -#define UMID_LEN 64 -#define UML_DIR "~/.uml/" - -/* Changed by set_umid and make_umid, which are run early in boot */ -static char umid[UMID_LEN] = { 0 }; - -/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */ -static char *uml_dir = UML_DIR; - -/* Changed by set_umid */ -static int umid_is_random = 1; +/* Changed by set_umid_arg */ static int umid_inited = 0; -/* Have we created the files? Should we remove them? */ -static int umid_owned = 0; -static int make_umid(int (*printer)(const char *fmt, ...)); - -static int __init set_umid(char *name, int is_random, - int (*printer)(const char *fmt, ...)) +static int __init set_umid_arg(char *name, int *add) { - if(umid_inited){ - (*printer)("Unique machine name can't be set twice\n"); - return(-1); - } + int err; - if(strlen(name) > UMID_LEN - 1) - (*printer)("Unique machine name is being truncated to %d " - "characters\n", UMID_LEN); - strlcpy(umid, name, sizeof(umid)); + if(umid_inited) + return 0; - umid_is_random = is_random; - umid_inited = 1; - return 0; -} - -static int __init set_umid_arg(char *name, int *add) -{ *add = 0; - return(set_umid(name, 0, printf)); + err = set_umid(name); + if(err == -EEXIST) + printf("umid '%s' already in use\n", name); + else if(!err) + umid_inited = 1; + + return 0; } __uml_setup("umid=", set_umid_arg, @@ -66,265 +35,3 @@ __uml_setup("umid=", set_umid_arg, " is used for naming the pid file and management console socket.\n\n" ); -int __init umid_file_name(char *name, char *buf, int len) -{ - int n; - - if(!umid_inited && make_umid(printk)) return(-1); - - n = strlen(uml_dir) + strlen(umid) + strlen(name) + 1; - if(n > len){ - printk("umid_file_name : buffer too short\n"); - return(-1); - } - - sprintf(buf, "%s%s/%s", uml_dir, umid, name); - return(0); -} - -extern int tracing_pid; - -static void __init create_pid_file(void) -{ - char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; - char pid[sizeof("nnnnn\0")]; - int fd, n; - - if(umid_file_name("pid", file, sizeof(file))) - return; - - fd = os_open_file(file, of_create(of_excl(of_rdwr(OPENFLAGS()))), - 0644); - if(fd < 0){ - printf("Open of machine pid file \"%s\" failed: %s\n", - file, strerror(-fd)); - return; - } - - sprintf(pid, "%d\n", os_getpid()); - n = os_write_file(fd, pid, strlen(pid)); - if(n != strlen(pid)) - printf("Write of pid file failed - err = %d\n", -n); - os_close_file(fd); -} - -static int actually_do_remove(char *dir) -{ - DIR *directory; - struct dirent *ent; - int len; - char file[256]; - - directory = opendir(dir); - if(directory == NULL){ - printk("actually_do_remove : couldn't open directory '%s', " - "errno = %d\n", dir, errno); - return(1); - } - while((ent = readdir(directory)) != NULL){ - if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) - continue; - len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1; - if(len > sizeof(file)){ - printk("Not deleting '%s' from '%s' - name too long\n", - ent->d_name, dir); - continue; - } - sprintf(file, "%s/%s", dir, ent->d_name); - if(unlink(file) < 0){ - printk("actually_do_remove : couldn't remove '%s' " - "from '%s', errno = %d\n", ent->d_name, dir, - errno); - return(1); - } - } - if(rmdir(dir) < 0){ - printk("actually_do_remove : couldn't rmdir '%s', " - "errno = %d\n", dir, errno); - return(1); - } - return(0); -} - -void remove_umid_dir(void) -{ - char dir[strlen(uml_dir) + UMID_LEN + 1]; - if (!umid_owned) - return; - - sprintf(dir, "%s%s", uml_dir, umid); - actually_do_remove(dir); -} - -char *get_umid(int only_if_set) -{ - if(only_if_set && umid_is_random) - return NULL; - return umid; -} - -static int not_dead_yet(char *dir) -{ - char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; - char pid[sizeof("nnnnn\0")], *end; - int dead, fd, p, n; - - sprintf(file, "%s/pid", dir); - dead = 0; - fd = os_open_file(file, of_read(OPENFLAGS()), 0); - if(fd < 0){ - if(fd != -ENOENT){ - printk("not_dead_yet : couldn't open pid file '%s', " - "err = %d\n", file, -fd); - return(1); - } - dead = 1; - } - if(fd > 0){ - n = os_read_file(fd, pid, sizeof(pid)); - if(n < 0){ - printk("not_dead_yet : couldn't read pid file '%s', " - "err = %d\n", file, -n); - return(1); - } - p = strtoul(pid, &end, 0); - if(end == pid){ - printk("not_dead_yet : couldn't parse pid file '%s', " - "errno = %d\n", file, errno); - dead = 1; - } - if(((kill(p, 0) < 0) && (errno == ESRCH)) || - (p == CHOOSE_MODE(tracing_pid, os_getpid()))) - dead = 1; - } - if(!dead) - return(1); - return(actually_do_remove(dir)); -} - -static int __init set_uml_dir(char *name, int *add) -{ - if((strlen(name) > 0) && (name[strlen(name) - 1] != '/')){ - uml_dir = malloc(strlen(name) + 2); - if(uml_dir == NULL){ - printf("Failed to malloc uml_dir - error = %d\n", - errno); - uml_dir = name; - /* Return 0 here because do_initcalls doesn't look at - * the return value. - */ - return(0); - } - sprintf(uml_dir, "%s/", name); - } - else uml_dir = name; - return(0); -} - -static int __init make_uml_dir(void) -{ - char dir[MAXPATHLEN + 1] = { '\0' }; - int len; - - if(*uml_dir == '~'){ - char *home = getenv("HOME"); - - if(home == NULL){ - printf("make_uml_dir : no value in environment for " - "$HOME\n"); - exit(1); - } - strlcpy(dir, home, sizeof(dir)); - uml_dir++; - } - strlcat(dir, uml_dir, sizeof(dir)); - len = strlen(dir); - if (len > 0 && dir[len - 1] != '/') - strlcat(dir, "/", sizeof(dir)); - - uml_dir = malloc(strlen(dir) + 1); - if (uml_dir == NULL) { - printf("make_uml_dir : malloc failed, errno = %d\n", errno); - exit(1); - } - strcpy(uml_dir, dir); - - if((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)){ - printf("Failed to mkdir %s: %s\n", uml_dir, strerror(errno)); - return(-1); - } - return 0; -} - -static int __init make_umid(int (*printer)(const char *fmt, ...)) -{ - int fd, err; - char tmp[strlen(uml_dir) + UMID_LEN + 1]; - - strlcpy(tmp, uml_dir, sizeof(tmp)); - - if(!umid_inited){ - strcat(tmp, "XXXXXX"); - fd = mkstemp(tmp); - if(fd < 0){ - (*printer)("make_umid - mkstemp(%s) failed: %s\n", - tmp,strerror(errno)); - return(1); - } - - os_close_file(fd); - /* There's a nice tiny little race between this unlink and - * the mkdir below. It'd be nice if there were a mkstemp - * for directories. - */ - unlink(tmp); - set_umid(&tmp[strlen(uml_dir)], 1, printer); - } - - sprintf(tmp, "%s%s", uml_dir, umid); - - err = mkdir(tmp, 0777); - if(err < 0){ - if(errno == EEXIST){ - if(not_dead_yet(tmp)){ - (*printer)("umid '%s' is in use\n", umid); - umid_owned = 0; - return(-1); - } - err = mkdir(tmp, 0777); - } - } - if(err < 0){ - (*printer)("Failed to create %s - errno = %d\n", umid, errno); - return(-1); - } - - umid_owned = 1; - return 0; -} - -__uml_setup("uml_dir=", set_uml_dir, -"uml_dir=<directory>\n" -" The location to place the pid and umid files.\n\n" -); - -static int __init make_umid_setup(void) -{ - /* one function with the ordering we need ... */ - make_uml_dir(); - make_umid(printf); - create_pid_file(); - return 0; -} -__uml_postsetup(make_umid_setup); - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile index b83ac8e..11e30b13 100644 --- a/arch/um/os-Linux/Makefile +++ b/arch/um/os-Linux/Makefile @@ -4,11 +4,11 @@ # obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ - start_up.o time.o tt.o tty.o uaccess.o user_syms.o drivers/ \ + start_up.o time.o tt.o tty.o uaccess.o umid.o user_syms.o drivers/ \ sys-$(SUBARCH)/ USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ - start_up.o time.o tt.o tty.o uaccess.o + start_up.o time.o tt.o tty.o uaccess.o umid.o elf_aux.o: $(ARCH_DIR)/kernel-offsets.h CFLAGS_elf_aux.o += -I$(objtree)/arch/um diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c index ffa759a..f897140 100644 --- a/arch/um/os-Linux/aio.c +++ b/arch/um/os-Linux/aio.c @@ -16,12 +16,12 @@ #include "mode.h" struct aio_thread_req { - enum aio_type type; - int io_fd; - unsigned long long offset; - char *buf; - int len; - struct aio_context *aio; + enum aio_type type; + int io_fd; + unsigned long long offset; + char *buf; + int len; + struct aio_context *aio; }; static int aio_req_fd_r = -1; @@ -38,18 +38,18 @@ static int aio_req_fd_w = -1; static long io_setup(int n, aio_context_t *ctxp) { - return syscall(__NR_io_setup, n, ctxp); + return syscall(__NR_io_setup, n, ctxp); } static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp) { - return syscall(__NR_io_submit, ctx, nr, iocbpp); + return syscall(__NR_io_submit, ctx, nr, iocbpp); } static long io_getevents(aio_context_t ctx_id, long min_nr, long nr, - struct io_event *events, struct timespec *timeout) + struct io_event *events, struct timespec *timeout) { - return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout); + return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout); } #endif @@ -66,243 +66,245 @@ static long io_getevents(aio_context_t ctx_id, long min_nr, long nr, */ static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf, - int len, unsigned long long offset, struct aio_context *aio) + int len, unsigned long long offset, struct aio_context *aio) { - struct iocb iocb, *iocbp = &iocb; - char c; - int err; - - iocb = ((struct iocb) { .aio_data = (unsigned long) aio, - .aio_reqprio = 0, - .aio_fildes = fd, - .aio_buf = (unsigned long) buf, - .aio_nbytes = len, - .aio_offset = offset, - .aio_reserved1 = 0, - .aio_reserved2 = 0, - .aio_reserved3 = 0 }); - - switch(type){ - case AIO_READ: - iocb.aio_lio_opcode = IOCB_CMD_PREAD; - err = io_submit(ctx, 1, &iocbp); - break; - case AIO_WRITE: - iocb.aio_lio_opcode = IOCB_CMD_PWRITE; - err = io_submit(ctx, 1, &iocbp); - break; - case AIO_MMAP: - iocb.aio_lio_opcode = IOCB_CMD_PREAD; - iocb.aio_buf = (unsigned long) &c; - iocb.aio_nbytes = sizeof(c); - err = io_submit(ctx, 1, &iocbp); - break; - default: - printk("Bogus op in do_aio - %d\n", type); - err = -EINVAL; - break; - } - - if(err > 0) - err = 0; + struct iocb iocb, *iocbp = &iocb; + char c; + int err; + + iocb = ((struct iocb) { .aio_data = (unsigned long) aio, + .aio_reqprio = 0, + .aio_fildes = fd, + .aio_buf = (unsigned long) buf, + .aio_nbytes = len, + .aio_offset = offset, + .aio_reserved1 = 0, + .aio_reserved2 = 0, + .aio_reserved3 = 0 }); + + switch(type){ + case AIO_READ: + iocb.aio_lio_opcode = IOCB_CMD_PREAD; + err = io_submit(ctx, 1, &iocbp); + break; + case AIO_WRITE: + iocb.aio_lio_opcode = IOCB_CMD_PWRITE; + err = io_submit(ctx, 1, &iocbp); + break; + case AIO_MMAP: + iocb.aio_lio_opcode = IOCB_CMD_PREAD; + iocb.aio_buf = (unsigned long) &c; + iocb.aio_nbytes = sizeof(c); + err = io_submit(ctx, 1, &iocbp); + break; + default: + printk("Bogus op in do_aio - %d\n", type); + err = -EINVAL; + break; + } + + if(err > 0) + err = 0; else err = -errno; - return err; + return err; } static aio_context_t ctx = 0; static int aio_thread(void *arg) { - struct aio_thread_reply reply; - struct io_event event; - int err, n, reply_fd; - - signal(SIGWINCH, SIG_IGN); - - while(1){ - n = io_getevents(ctx, 1, 1, &event, NULL); - if(n < 0){ - if(errno == EINTR) - continue; - printk("aio_thread - io_getevents failed, " - "errno = %d\n", errno); - } - else { - reply = ((struct aio_thread_reply) - { .data = (void *) (long) event.data, - .err = event.res }); + struct aio_thread_reply reply; + struct io_event event; + int err, n, reply_fd; + + signal(SIGWINCH, SIG_IGN); + + while(1){ + n = io_getevents(ctx, 1, 1, &event, NULL); + if(n < 0){ + if(errno == EINTR) + continue; + printk("aio_thread - io_getevents failed, " + "errno = %d\n", errno); + } + else { + reply = ((struct aio_thread_reply) + { .data = (void *) (long) event.data, + .err = event.res }); reply_fd = ((struct aio_context *) reply.data)->reply_fd; err = os_write_file(reply_fd, &reply, sizeof(reply)); - if(err != sizeof(reply)) + if(err != sizeof(reply)) printk("aio_thread - write failed, fd = %d, " - "err = %d\n", aio_req_fd_r, -err); - } - } - return 0; + "err = %d\n", aio_req_fd_r, -err); + } + } + return 0; } #endif static int do_not_aio(struct aio_thread_req *req) { - char c; - int err; - - switch(req->type){ - case AIO_READ: - err = os_seek_file(req->io_fd, req->offset); - if(err) - goto out; - - err = os_read_file(req->io_fd, req->buf, req->len); - break; - case AIO_WRITE: - err = os_seek_file(req->io_fd, req->offset); - if(err) - goto out; - - err = os_write_file(req->io_fd, req->buf, req->len); - break; - case AIO_MMAP: - err = os_seek_file(req->io_fd, req->offset); - if(err) - goto out; - - err = os_read_file(req->io_fd, &c, sizeof(c)); - break; - default: - printk("do_not_aio - bad request type : %d\n", req->type); - err = -EINVAL; - break; - } - - out: - return err; + char c; + int err; + + switch(req->type){ + case AIO_READ: + err = os_seek_file(req->io_fd, req->offset); + if(err) + goto out; + + err = os_read_file(req->io_fd, req->buf, req->len); + break; + case AIO_WRITE: + err = os_seek_file(req->io_fd, req->offset); + if(err) + goto out; + + err = os_write_file(req->io_fd, req->buf, req->len); + break; + case AIO_MMAP: + err = os_seek_file(req->io_fd, req->offset); + if(err) + goto out; + + err = os_read_file(req->io_fd, &c, sizeof(c)); + break; + default: + printk("do_not_aio - bad request type : %d\n", req->type); + err = -EINVAL; + break; + } + +out: + return err; } static int not_aio_thread(void *arg) { - struct aio_thread_req req; - struct aio_thread_reply reply; - int err; - - signal(SIGWINCH, SIG_IGN); - while(1){ - err = os_read_file(aio_req_fd_r, &req, sizeof(req)); - if(err != sizeof(req)){ - if(err < 0) - printk("not_aio_thread - read failed, " - "fd = %d, err = %d\n", aio_req_fd_r, - -err); - else { - printk("not_aio_thread - short read, fd = %d, " - "length = %d\n", aio_req_fd_r, err); - } - continue; - } - err = do_not_aio(&req); - reply = ((struct aio_thread_reply) { .data = req.aio, - .err = err }); - err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply)); - if(err != sizeof(reply)) - printk("not_aio_thread - write failed, fd = %d, " - "err = %d\n", aio_req_fd_r, -err); - } + struct aio_thread_req req; + struct aio_thread_reply reply; + int err; + + signal(SIGWINCH, SIG_IGN); + while(1){ + err = os_read_file(aio_req_fd_r, &req, sizeof(req)); + if(err != sizeof(req)){ + if(err < 0) + printk("not_aio_thread - read failed, " + "fd = %d, err = %d\n", aio_req_fd_r, + -err); + else { + printk("not_aio_thread - short read, fd = %d, " + "length = %d\n", aio_req_fd_r, err); + } + continue; + } + err = do_not_aio(&req); + reply = ((struct aio_thread_reply) { .data = req.aio, + .err = err }); + err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply)); + if(err != sizeof(reply)) + printk("not_aio_thread - write failed, fd = %d, " + "err = %d\n", aio_req_fd_r, -err); + } + + return 0; } static int aio_pid = -1; static int init_aio_24(void) { - unsigned long stack; - int fds[2], err; - - err = os_pipe(fds, 1, 1); - if(err) - goto out; - - aio_req_fd_w = fds[0]; - aio_req_fd_r = fds[1]; - err = run_helper_thread(not_aio_thread, NULL, - CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); - if(err < 0) - goto out_close_pipe; - - aio_pid = err; - goto out; - - out_close_pipe: - os_close_file(fds[0]); - os_close_file(fds[1]); - aio_req_fd_w = -1; - aio_req_fd_r = -1; - out: + unsigned long stack; + int fds[2], err; + + err = os_pipe(fds, 1, 1); + if(err) + goto out; + + aio_req_fd_w = fds[0]; + aio_req_fd_r = fds[1]; + err = run_helper_thread(not_aio_thread, NULL, + CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); + if(err < 0) + goto out_close_pipe; + + aio_pid = err; + goto out; + +out_close_pipe: + os_close_file(fds[0]); + os_close_file(fds[1]); + aio_req_fd_w = -1; + aio_req_fd_r = -1; +out: #ifndef HAVE_AIO_ABI printk("/usr/include/linux/aio_abi.h not present during build\n"); #endif printk("2.6 host AIO support not used - falling back to I/O " "thread\n"); - return 0; + return 0; } #ifdef HAVE_AIO_ABI #define DEFAULT_24_AIO 0 static int init_aio_26(void) { - unsigned long stack; - int err; + unsigned long stack; + int err; - if(io_setup(256, &ctx)){ + if(io_setup(256, &ctx)){ err = -errno; - printk("aio_thread failed to initialize context, err = %d\n", - errno); - return err; - } + printk("aio_thread failed to initialize context, err = %d\n", + errno); + return err; + } - err = run_helper_thread(aio_thread, NULL, - CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); - if(err < 0) - return err; + err = run_helper_thread(aio_thread, NULL, + CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); + if(err < 0) + return err; - aio_pid = err; + aio_pid = err; printk("Using 2.6 host AIO\n"); - return 0; + return 0; } static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, unsigned long long offset, struct aio_context *aio) { - struct aio_thread_reply reply; - int err; - - err = do_aio(ctx, type, io_fd, buf, len, offset, aio); - if(err){ - reply = ((struct aio_thread_reply) { .data = aio, - .err = err }); - err = os_write_file(aio->reply_fd, &reply, sizeof(reply)); - if(err != sizeof(reply)) - printk("submit_aio_26 - write failed, " - "fd = %d, err = %d\n", aio->reply_fd, -err); - else err = 0; - } - - return err; + struct aio_thread_reply reply; + int err; + + err = do_aio(ctx, type, io_fd, buf, len, offset, aio); + if(err){ + reply = ((struct aio_thread_reply) { .data = aio, + .err = err }); + err = os_write_file(aio->reply_fd, &reply, sizeof(reply)); + if(err != sizeof(reply)) + printk("submit_aio_26 - write failed, " + "fd = %d, err = %d\n", aio->reply_fd, -err); + else err = 0; + } + + return err; } #else #define DEFAULT_24_AIO 1 static int init_aio_26(void) { - return -ENOSYS; + return -ENOSYS; } static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, unsigned long long offset, struct aio_context *aio) { - return -ENOSYS; + return -ENOSYS; } #endif @@ -310,8 +312,8 @@ static int aio_24 = DEFAULT_24_AIO; static int __init set_aio_24(char *name, int *add) { - aio_24 = 1; - return 0; + aio_24 = 1; + return 0; } __uml_setup("aio=2.4", set_aio_24, @@ -328,28 +330,27 @@ __uml_setup("aio=2.4", set_aio_24, static int init_aio(void) { - int err; - - CHOOSE_MODE(({ - if(!aio_24){ - printk("Disabling 2.6 AIO in tt mode\n"); - aio_24 = 1; - } }), (void) 0); - - if(!aio_24){ - err = init_aio_26(); - if(err && (errno == ENOSYS)){ - printk("2.6 AIO not supported on the host - " - "reverting to 2.4 AIO\n"); - aio_24 = 1; - } - else return err; - } - - if(aio_24) - return init_aio_24(); - - return 0; + int err; + + CHOOSE_MODE(({ if(!aio_24){ + printk("Disabling 2.6 AIO in tt mode\n"); + aio_24 = 1; + } }), (void) 0); + + if(!aio_24){ + err = init_aio_26(); + if(err && (errno == ENOSYS)){ + printk("2.6 AIO not supported on the host - " + "reverting to 2.4 AIO\n"); + aio_24 = 1; + } + else return err; + } + + if(aio_24) + return init_aio_24(); + + return 0; } /* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio @@ -362,8 +363,8 @@ __initcall(init_aio); static void exit_aio(void) { - if(aio_pid != -1) - os_kill_process(aio_pid, 1); + if(aio_pid != -1) + os_kill_process(aio_pid, 1); } __uml_exitcall(exit_aio); @@ -371,30 +372,30 @@ __uml_exitcall(exit_aio); static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len, unsigned long long offset, struct aio_context *aio) { - struct aio_thread_req req = { .type = type, - .io_fd = io_fd, - .offset = offset, - .buf = buf, - .len = len, - .aio = aio, - }; - int err; - - err = os_write_file(aio_req_fd_w, &req, sizeof(req)); - if(err == sizeof(req)) - err = 0; - - return err; + struct aio_thread_req req = { .type = type, + .io_fd = io_fd, + .offset = offset, + .buf = buf, + .len = len, + .aio = aio, + }; + int err; + + err = os_write_file(aio_req_fd_w, &req, sizeof(req)); + if(err == sizeof(req)) + err = 0; + + return err; } int submit_aio(enum aio_type type, int io_fd, char *buf, int len, - unsigned long long offset, int reply_fd, - struct aio_context *aio) + unsigned long long offset, int reply_fd, + struct aio_context *aio) { - aio->reply_fd = reply_fd; - if(aio_24) - return submit_aio_24(type, io_fd, buf, len, offset, aio); - else { - return submit_aio_26(type, io_fd, buf, len, offset, aio); - } + aio->reply_fd = reply_fd; + if(aio_24) + return submit_aio_24(type, io_fd, buf, len, offset, aio); + else { + return submit_aio_26(type, io_fd, buf, len, offset, aio); + } } diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c new file mode 100644 index 0000000..ecf107a --- /dev/null +++ b/arch/um/os-Linux/umid.c @@ -0,0 +1,335 @@ +#include <stdio.h> +#include <unistd.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <signal.h> +#include <dirent.h> +#include <sys/fcntl.h> +#include <sys/stat.h> +#include <sys/param.h> +#include "init.h" +#include "os.h" +#include "user.h" +#include "mode.h" + +#define UML_DIR "~/.uml/" + +#define UMID_LEN 64 + +/* Changed by set_umid, which is run early in boot */ +char umid[UMID_LEN] = { 0 }; + +/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */ +static char *uml_dir = UML_DIR; + +static int __init make_uml_dir(void) +{ + char dir[512] = { '\0' }; + int len, err; + + if(*uml_dir == '~'){ + char *home = getenv("HOME"); + + err = -ENOENT; + if(home == NULL){ + printk("make_uml_dir : no value in environment for " + "$HOME\n"); + goto err; + } + strlcpy(dir, home, sizeof(dir)); + uml_dir++; + } + strlcat(dir, uml_dir, sizeof(dir)); + len = strlen(dir); + if (len > 0 && dir[len - 1] != '/') + strlcat(dir, "/", sizeof(dir)); + + err = -ENOMEM; + uml_dir = malloc(strlen(dir) + 1); + if (uml_dir == NULL) { + printf("make_uml_dir : malloc failed, errno = %d\n", errno); + goto err; + } + strcpy(uml_dir, dir); + + if((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)){ + printf("Failed to mkdir '%s': %s\n", uml_dir, strerror(errno)); + err = -errno; + goto err_free; + } + return 0; + +err_free: + free(uml_dir); +err: + uml_dir = NULL; + return err; +} + +static int actually_do_remove(char *dir) +{ + DIR *directory; + struct dirent *ent; + int len; + char file[256]; + + directory = opendir(dir); + if(directory == NULL) + return -errno; + + while((ent = readdir(directory)) != NULL){ + if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) + continue; + len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1; + if(len > sizeof(file)) + return -E2BIG; + + sprintf(file, "%s/%s", dir, ent->d_name); + if(unlink(file) < 0) + return -errno; + } + if(rmdir(dir) < 0) + return -errno; + + return 0; +} + +/* This says that there isn't already a user of the specified directory even if + * there are errors during the checking. This is because if these errors + * happen, the directory is unusable by the pre-existing UML, so we might as + * well take it over. This could happen either by + * the existing UML somehow corrupting its umid directory + * something other than UML sticking stuff in the directory + * this boot racing with a shutdown of the other UML + * In any of these cases, the directory isn't useful for anything else. + */ + +static int not_dead_yet(char *dir) +{ + char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; + char pid[sizeof("nnnnn\0")], *end; + int dead, fd, p, n, err; + + n = snprintf(file, sizeof(file), "%s/pid", dir); + if(n >= sizeof(file)){ + printk("not_dead_yet - pid filename too long\n"); + err = -E2BIG; + goto out; + } + + dead = 0; + fd = open(file, O_RDONLY); + if(fd < 0){ + if(fd != -ENOENT){ + printk("not_dead_yet : couldn't open pid file '%s', " + "err = %d\n", file, -fd); + } + goto out; + } + + err = 0; + n = read(fd, pid, sizeof(pid)); + if(n <= 0){ + printk("not_dead_yet : couldn't read pid file '%s', " + "err = %d\n", file, -n); + goto out_close; + } + + p = strtoul(pid, &end, 0); + if(end == pid){ + printk("not_dead_yet : couldn't parse pid file '%s', " + "errno = %d\n", file, errno); + goto out_close; + } + + if((kill(p, 0) == 0) || (errno != ESRCH)) + return 1; + + err = actually_do_remove(dir); + if(err) + printk("not_dead_yet - actually_do_remove failed with " + "err = %d\n", err); + + return err; + + out_close: + close(fd); + out: + return 0; +} + +static void __init create_pid_file(void) +{ + char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; + char pid[sizeof("nnnnn\0")]; + int fd, n; + + if(umid_file_name("pid", file, sizeof(file))) + return; + + fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644); + if(fd < 0){ + printk("Open of machine pid file \"%s\" failed: %s\n", + file, strerror(-fd)); + return; + } + + snprintf(pid, sizeof(pid), "%d\n", getpid()); + n = write(fd, pid, strlen(pid)); + if(n != strlen(pid)) + printk("Write of pid file failed - err = %d\n", -n); + + close(fd); +} + +int __init set_umid(char *name) +{ + if(strlen(name) > UMID_LEN - 1) + return -E2BIG; + + strlcpy(umid, name, sizeof(umid)); + + return 0; +} + +static int umid_setup = 0; + +int __init make_umid(void) +{ + int fd, err; + char tmp[256]; + + if(umid_setup) + return 0; + + make_uml_dir(); + + if(*umid == '\0'){ + strlcpy(tmp, uml_dir, sizeof(tmp)); + strlcat(tmp, "XXXXXX", sizeof(tmp)); + fd = mkstemp(tmp); + if(fd < 0){ + printk("make_umid - mkstemp(%s) failed: %s\n", + tmp, strerror(errno)); + err = -errno; + goto err; + } + + close(fd); + + set_umid(&tmp[strlen(uml_dir)]); + + /* There's a nice tiny little race between this unlink and + * the mkdir below. It'd be nice if there were a mkstemp + * for directories. + */ + if(unlink(tmp)){ + err = -errno; + goto err; + } + } + + snprintf(tmp, sizeof(tmp), "%s%s", uml_dir, umid); + err = mkdir(tmp, 0777); + if(err < 0){ + err = -errno; + if(errno != EEXIST) + goto err; + + if(not_dead_yet(tmp) < 0) + goto err; + + err = mkdir(tmp, 0777); + } + if(err < 0){ + printk("Failed to create '%s' - err = %d\n", umid, err); + goto err_rmdir; + } + + umid_setup = 1; + + create_pid_file(); + + return 0; + + err_rmdir: + rmdir(tmp); + err: + return err; +} + +static int __init make_umid_init(void) +{ + make_umid(); + + return 0; +} + +__initcall(make_umid_init); + +int __init umid_file_name(char *name, char *buf, int len) +{ + int n, err; + + err = make_umid(); + if(err) + return err; + + n = snprintf(buf, len, "%s%s/%s", uml_dir, umid, name); + if(n >= len){ + printk("umid_file_name : buffer too short\n"); + return -E2BIG; + } + + return 0; +} + +char *get_umid(void) +{ + return umid; +} + +static int __init set_uml_dir(char *name, int *add) +{ + if(*name == '\0'){ + printf("uml_dir can't be an empty string\n"); + return 0; + } + + if(name[strlen(name) - 1] == '/'){ + uml_dir = name; + return 0; + } + + uml_dir = malloc(strlen(name) + 2); + if(uml_dir == NULL){ + printf("Failed to malloc uml_dir - error = %d\n", errno); + + /* Return 0 here because do_initcalls doesn't look at + * the return value. + */ + return 0; + } + sprintf(uml_dir, "%s/", name); + + return 0; +} + +__uml_setup("uml_dir=", set_uml_dir, +"uml_dir=<directory>\n" +" The location to place the pid and umid files.\n\n" +); + +static void remove_umid_dir(void) +{ + char dir[strlen(uml_dir) + UMID_LEN + 1], err; + + sprintf(dir, "%s%s", uml_dir, umid); + err = actually_do_remove(dir); + if(err) + printf("remove_umid_dir - actually_do_remove failed with " + "err = %d\n", err); +} + +__uml_exitcall(remove_umid_dir); diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug index e2c6e64a..fcb06a5 100644 --- a/arch/x86_64/Kconfig.debug +++ b/arch/x86_64/Kconfig.debug @@ -9,6 +9,16 @@ config INIT_DEBUG Fill __init and __initdata at the end of boot. This helps debugging illegal uses of __init and __initdata after initialization. +config DEBUG_RODATA + bool "Write protect kernel read-only data structures" + depends on DEBUG_KERNEL + help + Mark the kernel read-only data as write-protected in the pagetables, + in order to catch accidental (and incorrect) writes to such const data. + This option may have a slight performance impact because a portion + of the kernel code won't be covered by a 2MB TLB anymore. + If in doubt, say "N". + config IOMMU_DEBUG depends on GART_IOMMU && DEBUG_KERNEL bool "Enable IOMMU debugging" diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index e0eb0c7..df0773c 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -341,7 +341,7 @@ ENTRY(ia32_ptregs_common) jmp ia32_sysret /* misbalances the return cache */ CFI_ENDPROC - .data + .section .rodata,"a" .align 8 .globl ia32_sys_call_table ia32_sys_call_table: diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 7519fc5..3060ed9 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); DECLARE_PER_CPU(int, cpu_state); #include <asm/nmi.h> -/* We don't actually take CPU down, just spin without interrupts. */ +/* We halt the CPU with physical CPU hotplug */ static inline void play_dead(void) { idle_task_exit(); @@ -166,8 +166,9 @@ static inline void play_dead(void) /* Ack it */ __get_cpu_var(cpu_state) = CPU_DEAD; + local_irq_disable(); while (1) - safe_halt(); + halt(); } #else static inline void play_dead(void) diff --git a/arch/x86_64/kernel/syscall.c b/arch/x86_64/kernel/syscall.c index e263685..7c176b3 100644 --- a/arch/x86_64/kernel/syscall.c +++ b/arch/x86_64/kernel/syscall.c @@ -19,7 +19,7 @@ typedef void (*sys_call_ptr_t)(void); extern void sys_ni_syscall(void); -sys_call_ptr_t sys_call_table[__NR_syscall_max+1] __cacheline_aligned = { +const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ [0 ... __NR_syscall_max] = &sys_ni_syscall, #include <asm-x86_64/unistd.h> diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index c016dfe..1faae5f 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -498,6 +498,29 @@ void free_initmem(void) printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); } +#ifdef CONFIG_DEBUG_RODATA + +extern char __start_rodata, __end_rodata; +void mark_rodata_ro(void) +{ + unsigned long addr = (unsigned long)&__start_rodata; + + for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE) + change_page_attr_addr(addr, 1, PAGE_KERNEL_RO); + + printk ("Write protecting the kernel read-only data: %luk\n", + (&__end_rodata - &__start_rodata) >> 10); + + /* + * change_page_attr_addr() requires a global_flush_tlb() call after it. + * We do this after the printk so that if something went wrong in the + * change, the printk gets out at least to give a better debug hint + * of who is the culprit. + */ + global_flush_tlb(); +} +#endif + #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index b90e8fe..35f1f1a 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c @@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, pte_t *kpte; struct page *kpte_page; unsigned kpte_flags; + pgprot_t ref_prot2; kpte = lookup_address(address); if (!kpte) return 0; kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); @@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, * split_large_page will take the reference for this change_page_attr * on the split page. */ - struct page *split = split_large_page(address, prot, ref_prot); + + struct page *split; + ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE)); + + split = split_large_page(address, prot, ref_prot2); if (!split) return -ENOMEM; - set_pte(kpte,mk_pte(split, ref_prot)); + set_pte(kpte,mk_pte(split, ref_prot2)); kpte_page = split; } get_page(kpte_page); diff --git a/block/Kconfig b/block/Kconfig index eb48edb..377f6dd 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -5,7 +5,7 @@ #for instance. config LBD bool "Support for Large Block Devices" - depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML + depends on X86 || (MIPS && 32BIT) || PPC32 || (S390 && !64BIT) || SUPERH || UML help Say Y here if you want to attach large (bigger than 2TB) discs to your machine, or if you want to have a raid or loopback device diff --git a/crypto/Kconfig b/crypto/Kconfig index 89299f4..52e1d41 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -40,10 +40,11 @@ config CRYPTO_SHA1 help SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). -config CRYPTO_SHA1_Z990 - tristate "SHA1 digest algorithm for IBM zSeries z990" - depends on CRYPTO && ARCH_S390 +config CRYPTO_SHA1_S390 + tristate "SHA1 digest algorithm (s390)" + depends on CRYPTO && S390 help + This is the s390 hardware accelerated implementation of the SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). config CRYPTO_SHA256 @@ -55,6 +56,16 @@ config CRYPTO_SHA256 This version of SHA implements a 256 bit hash with 128 bits of security against collision attacks. +config CRYPTO_SHA256_S390 + tristate "SHA256 digest algorithm (s390)" + depends on CRYPTO && S390 + help + This is the s390 hardware accelerated implementation of the + SHA256 secure hash standard (DFIPS 180-2). + + This version of SHA implements a 256 bit hash with 128 bits of + security against collision attacks. + config CRYPTO_SHA512 tristate "SHA384 and SHA512 digest algorithms" depends on CRYPTO @@ -98,9 +109,9 @@ config CRYPTO_DES help DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). -config CRYPTO_DES_Z990 - tristate "DES and Triple DES cipher algorithms for IBM zSeries z990" - depends on CRYPTO && ARCH_S390 +config CRYPTO_DES_S390 + tristate "DES and Triple DES cipher algorithms (s390)" + depends on CRYPTO && S390 help DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). @@ -204,6 +215,26 @@ config CRYPTO_AES_X86_64 See <http://csrc.nist.gov/encryption/aes/> for more information. +config CRYPTO_AES_S390 + tristate "AES cipher algorithms (s390)" + depends on CRYPTO && S390 + help + This is the s390 hardware accelerated implementation of the + AES cipher algorithms (FIPS-197). AES uses the Rijndael + algorithm. + + Rijndael appears to be consistently a very good performer in + both hardware and software across a wide range of computing + environments regardless of its use in feedback or non-feedback + modes. Its key setup time is excellent, and its key agility is + good. Rijndael's very low memory requirements make it very well + suited for restricted-space environments, in which it also + demonstrates excellent performance. Rijndael's operations are + among the easiest to defend against power and timing attacks. + + On s390 the System z9-109 currently only supports the key size + of 128 bit. + config CRYPTO_CAST5 tristate "CAST5 (CAST-128) cipher algorithm" depends on CRYPTO diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 53f4ee8..49e344f 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -805,6 +805,8 @@ static void do_test(void) //AES test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); + test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); + test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); //CAST5 test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); @@ -910,6 +912,8 @@ static void do_test(void) case 10: test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); + test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); + test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); break; case 11: diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 522ffd4..733d07e 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -1836,6 +1836,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { */ #define AES_ENC_TEST_VECTORS 3 #define AES_DEC_TEST_VECTORS 3 +#define AES_CBC_ENC_TEST_VECTORS 2 +#define AES_CBC_DEC_TEST_VECTORS 2 static struct cipher_testvec aes_enc_tv_template[] = { { /* From FIPS-197 */ @@ -1911,6 +1913,68 @@ static struct cipher_testvec aes_dec_tv_template[] = { }, }; +static struct cipher_testvec aes_cbc_enc_tv_template[] = { + { /* From RFC 3602 */ + .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b, + 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 }, + .klen = 16, + .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30, + 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 }, + .input = { "Single block msg" }, + .ilen = 16, + .result = { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8, + 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a }, + .rlen = 16, + }, { + .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0, + 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a }, + .klen = 16, + .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28, + 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 }, + .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .ilen = 32, + .result = { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a, + 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a, + 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9, + 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 }, + .rlen = 32, + }, +}; + +static struct cipher_testvec aes_cbc_dec_tv_template[] = { + { /* From RFC 3602 */ + .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b, + 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 }, + .klen = 16, + .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30, + 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 }, + .input = { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8, + 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a }, + .ilen = 16, + .result = { "Single block msg" }, + .rlen = 16, + }, { + .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0, + 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a }, + .klen = 16, + .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28, + 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 }, + .input = { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a, + 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a, + 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9, + 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 }, + .ilen = 32, + .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .rlen = 32, + }, +}; + /* Cast5 test vectors from RFC 2144 */ #define CAST5_ENC_TEST_VECTORS 3 #define CAST5_DEC_TEST_VECTORS 3 diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 7e1d077..58801d7 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -49,12 +49,12 @@ static struct kset_uevent_ops memory_uevent_ops = { static struct notifier_block *memory_chain; -static int register_memory_notifier(struct notifier_block *nb) +int register_memory_notifier(struct notifier_block *nb) { return notifier_chain_register(&memory_chain, nb); } -static void unregister_memory_notifier(struct notifier_block *nb) +void unregister_memory_notifier(struct notifier_block *nb) { notifier_chain_unregister(&memory_chain, nb); } @@ -62,8 +62,7 @@ static void unregister_memory_notifier(struct notifier_block *nb) /* * register_memory - Setup a sysfs device for a memory block */ -static int -register_memory(struct memory_block *memory, struct mem_section *section, +int register_memory(struct memory_block *memory, struct mem_section *section, struct node *root) { int error; diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index c4b9d2a..139cbba 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -117,7 +117,7 @@ config BLK_DEV_XD config PARIDE tristate "Parallel port IDE device support" - depends on PARPORT + depends on PARPORT_PC ---help--- There are many external CD-ROM and disk devices that connect through your computer's parallel port. Most of them are actually IDE devices diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 485345c..33d6f23 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -54,11 +54,15 @@ #include <linux/errno.h> #include <linux/file.h> #include <linux/ioctl.h> +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/kernel.h> #include <net/sock.h> #include <linux/devfs_fs_kernel.h> #include <asm/uaccess.h> +#include <asm/system.h> #include <asm/types.h> #include <linux/nbd.h> @@ -230,14 +234,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) request.len = htonl(size); memcpy(request.handle, &req, sizeof(req)); - down(&lo->tx_lock); - - if (!sock || !lo->sock) { - printk(KERN_ERR "%s: Attempted send on closed socket\n", - lo->disk->disk_name); - goto error_out; - } - dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", lo->disk->disk_name, req, nbdcmd_to_ascii(nbd_cmd(req)), @@ -276,11 +272,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) } } } - up(&lo->tx_lock); return 0; error_out: - up(&lo->tx_lock); return 1; } @@ -289,9 +283,14 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle) struct request *req; struct list_head *tmp; struct request *xreq; + int err; memcpy(&xreq, handle, sizeof(xreq)); + err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); + if (unlikely(err)) + goto out; + spin_lock(&lo->queue_lock); list_for_each(tmp, &lo->queue_head) { req = list_entry(tmp, struct request, queuelist); @@ -302,7 +301,11 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle) return req; } spin_unlock(&lo->queue_lock); - return NULL; + + err = -ENOENT; + +out: + return ERR_PTR(err); } static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) @@ -331,7 +334,11 @@ static struct request *nbd_read_stat(struct nbd_device *lo) goto harderror; } req = nbd_find_request(lo, reply.handle); - if (req == NULL) { + if (unlikely(IS_ERR(req))) { + result = PTR_ERR(req); + if (result != -ENOENT) + goto harderror; + printk(KERN_ERR "%s: Unexpected reply (%p)\n", lo->disk->disk_name, reply.handle); result = -EBADR; @@ -395,19 +402,24 @@ static void nbd_clear_que(struct nbd_device *lo) BUG_ON(lo->magic != LO_MAGIC); - do { - req = NULL; - spin_lock(&lo->queue_lock); - if (!list_empty(&lo->queue_head)) { - req = list_entry(lo->queue_head.next, struct request, queuelist); - list_del_init(&req->queuelist); - } - spin_unlock(&lo->queue_lock); - if (req) { - req->errors++; - nbd_end_request(req); - } - } while (req); + /* + * Because we have set lo->sock to NULL under the tx_lock, all + * modifications to the list must have completed by now. For + * the same reason, the active_req must be NULL. + * + * As a consequence, we don't need to take the spin lock while + * purging the list here. + */ + BUG_ON(lo->sock); + BUG_ON(lo->active_req); + + while (!list_empty(&lo->queue_head)) { + req = list_entry(lo->queue_head.next, struct request, + queuelist); + list_del_init(&req->queuelist); + req->errors++; + nbd_end_request(req); + } } /* @@ -435,11 +447,6 @@ static void do_nbd_request(request_queue_t * q) BUG_ON(lo->magic != LO_MAGIC); - if (!lo->file) { - printk(KERN_ERR "%s: Request when not-ready\n", - lo->disk->disk_name); - goto error_out; - } nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { nbd_cmd(req) = NBD_CMD_WRITE; @@ -453,32 +460,34 @@ static void do_nbd_request(request_queue_t * q) req->errors = 0; spin_unlock_irq(q->queue_lock); - spin_lock(&lo->queue_lock); - - if (!lo->file) { - spin_unlock(&lo->queue_lock); - printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n", - lo->disk->disk_name); + down(&lo->tx_lock); + if (unlikely(!lo->sock)) { + up(&lo->tx_lock); + printk(KERN_ERR "%s: Attempted send on closed socket\n", + lo->disk->disk_name); req->errors++; nbd_end_request(req); spin_lock_irq(q->queue_lock); continue; } - list_add(&req->queuelist, &lo->queue_head); - spin_unlock(&lo->queue_lock); + lo->active_req = req; if (nbd_send_req(lo, req) != 0) { printk(KERN_ERR "%s: Request send failed\n", lo->disk->disk_name); - if (nbd_find_request(lo, (char *)&req) != NULL) { - /* we still own req */ - req->errors++; - nbd_end_request(req); - } else /* we're racing with nbd_clear_que */ - printk(KERN_DEBUG "nbd: can't find req\n"); + req->errors++; + nbd_end_request(req); + } else { + spin_lock(&lo->queue_lock); + list_add(&req->queuelist, &lo->queue_head); + spin_unlock(&lo->queue_lock); } + lo->active_req = NULL; + up(&lo->tx_lock); + wake_up_all(&lo->active_wq); + spin_lock_irq(q->queue_lock); continue; @@ -529,17 +538,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file, down(&lo->tx_lock); lo->sock = NULL; up(&lo->tx_lock); - spin_lock(&lo->queue_lock); file = lo->file; lo->file = NULL; - spin_unlock(&lo->queue_lock); nbd_clear_que(lo); - spin_lock(&lo->queue_lock); - if (!list_empty(&lo->queue_head)) { - printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n"); - error = -EBUSY; - } - spin_unlock(&lo->queue_lock); + BUG_ON(!list_empty(&lo->queue_head)); if (file) fput(file); return error; @@ -598,24 +600,19 @@ static int nbd_ioctl(struct inode *inode, struct file *file, lo->sock = NULL; } up(&lo->tx_lock); - spin_lock(&lo->queue_lock); file = lo->file; lo->file = NULL; - spin_unlock(&lo->queue_lock); nbd_clear_que(lo); printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); if (file) fput(file); return lo->harderror; case NBD_CLEAR_QUE: - down(&lo->tx_lock); - if (lo->sock) { - up(&lo->tx_lock); - return 0; /* probably should be error, but that would - * break "nbd-client -d", so just return 0 */ - } - up(&lo->tx_lock); - nbd_clear_que(lo); + /* + * This is for compatibility only. The queue is always cleared + * by NBD_DO_IT or NBD_CLEAR_SOCK. + */ + BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); return 0; case NBD_PRINT_DEBUG: printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", @@ -688,6 +685,7 @@ static int __init nbd_init(void) spin_lock_init(&nbd_dev[i].queue_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); init_MUTEX(&nbd_dev[i].tx_lock); + init_waitqueue_head(&nbd_dev[i].active_wq); nbd_dev[i].blksize = 1024; nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ disk->major = NBD_MAJOR; diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig index 17ff405..c0d2854 100644 --- a/drivers/block/paride/Kconfig +++ b/drivers/block/paride/Kconfig @@ -4,11 +4,12 @@ # PARIDE doesn't need PARPORT, but if PARPORT is configured as a module, # PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option # controls the choices given to the user ... +# PARIDE only supports PC style parports. Tough for USB or other parports... config PARIDE_PARPORT tristate depends on PARIDE!=n - default m if PARPORT=m - default y if PARPORT!=m + default m if PARPORT_PC=m + default y if PARPORT_PC!=m comment "Parallel IDE high-level drivers" depends on PARIDE diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 84e68cd..5ebd06b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -985,7 +985,7 @@ config HPET_MMAP config HANGCHECK_TIMER tristate "Hangcheck timer" - depends on X86 || IA64 || PPC64 || ARCH_S390 + depends on X86 || IA64 || PPC64 || S390 help The hangcheck-timer module detects when the system has gone out to lunch past a certain margin. It can reboot the system diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 66e53dd..40a67c8 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c @@ -120,7 +120,7 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); #if defined(CONFIG_X86) # define HAVE_MONOTONIC # define TIMER_FREQ 1000000000ULL -#elif defined(CONFIG_ARCH_S390) +#elif defined(CONFIG_S390) /* FA240000 is 1 Second in the IBM time universe (Page 4-38 Principles of Op for zSeries */ # define TIMER_FREQ 0xFA240000ULL #elif defined(CONFIG_IA64) diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c index 6f673d2..49769f5 100644 --- a/drivers/char/hw_random.c +++ b/drivers/char/hw_random.c @@ -1,4 +1,9 @@ /* + Added support for the AMD Geode LX RNG + (c) Copyright 2004-2005 Advanced Micro Devices, Inc. + + derived from + Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> @@ -95,6 +100,11 @@ static unsigned int via_data_present (void); static u32 via_data_read (void); #endif +static int __init geode_init(struct pci_dev *dev); +static void geode_cleanup(void); +static unsigned int geode_data_present (void); +static u32 geode_data_read (void); + struct rng_operations { int (*init) (struct pci_dev *dev); void (*cleanup) (void); @@ -122,6 +132,7 @@ enum { rng_hw_intel, rng_hw_amd, rng_hw_via, + rng_hw_geode, }; static struct rng_operations rng_vendor_ops[] = { @@ -139,6 +150,9 @@ static struct rng_operations rng_vendor_ops[] = { /* rng_hw_via */ { via_init, via_cleanup, via_data_present, via_data_read, 1 }, #endif + + /* rng_hw_geode */ + { geode_init, geode_cleanup, geode_data_present, geode_data_read, 4 } }; /* @@ -159,6 +173,9 @@ static struct pci_device_id rng_pci_tbl[] = { { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_geode }, + { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE (pci, rng_pci_tbl); @@ -460,6 +477,57 @@ static void via_cleanup(void) } #endif +/*********************************************************************** + * + * AMD Geode RNG operations + * + */ + +static void __iomem *geode_rng_base = NULL; + +#define GEODE_RNG_DATA_REG 0x50 +#define GEODE_RNG_STATUS_REG 0x54 + +static u32 geode_data_read(void) +{ + u32 val; + + assert(geode_rng_base != NULL); + val = readl(geode_rng_base + GEODE_RNG_DATA_REG); + return val; +} + +static unsigned int geode_data_present(void) +{ + u32 val; + + assert(geode_rng_base != NULL); + val = readl(geode_rng_base + GEODE_RNG_STATUS_REG); + return val; +} + +static void geode_cleanup(void) +{ + iounmap(geode_rng_base); + geode_rng_base = NULL; +} + +static int geode_init(struct pci_dev *dev) +{ + unsigned long rng_base = pci_resource_start(dev, 0); + + if (rng_base == 0) + return 1; + + geode_rng_base = ioremap(rng_base, 0x58); + + if (geode_rng_base == NULL) { + printk(KERN_ERR PFX "Cannot ioremap RNG memory\n"); + return -EBUSY; + } + + return 0; +} /*********************************************************************** * @@ -574,7 +642,7 @@ static int __init rng_init (void) DPRINTK ("ENTER\n"); - /* Probe for Intel, AMD RNGs */ + /* Probe for Intel, AMD, Geode RNGs */ for_each_pci_dev(pdev) { ent = pci_match_id(rng_pci_tbl, pdev); if (ent) { diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 1f56b4c..561430e 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -787,7 +787,6 @@ int ipmi_destroy_user(ipmi_user_t user) int i; unsigned long flags; struct cmd_rcvr *rcvr; - struct list_head *entry1, *entry2; struct cmd_rcvr *rcvrs = NULL; user->valid = 1; @@ -812,8 +811,7 @@ int ipmi_destroy_user(ipmi_user_t user) * synchronize_rcu()) then free everything in that list. */ down(&intf->cmd_rcvrs_lock); - list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) { - rcvr = list_entry(entry1, struct cmd_rcvr, link); + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { if (rcvr->user == user) { list_del_rcu(&rcvr->link); rcvr->next = rcvrs; diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig index 344001b..a654479 100644 --- a/drivers/char/watchdog/Kconfig +++ b/drivers/char/watchdog/Kconfig @@ -438,7 +438,7 @@ config INDYDOG config ZVM_WATCHDOG tristate "z/VM Watchdog Timer" - depends on WATCHDOG && ARCH_S390 + depends on WATCHDOG && S390 help IBM s/390 and zSeries machines running under z/VM 5.1 or later provide a virtual watchdog timer to their guest that cause a diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index 64fbbb0..25ef5a8 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -1027,10 +1027,10 @@ static int hpsbpkt_thread(void *__hi) daemonize("khpsbpkt"); + current->flags |= PF_NOFREEZE; + while (1) { if (down_interruptible(&khpsbpkt_sig)) { - if (try_to_freeze()) - continue; printk("khpsbpkt: received unexpected signal?!\n" ); break; } diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 9f2352b..a1e660e 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -157,7 +157,7 @@ struct input_event_compat { # define COMPAT_TEST test_thread_flag(TIF_IA32) #elif defined(CONFIG_IA64) # define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) -#elif defined(CONFIG_ARCH_S390) +#elif defined(CONFIG_S390) # define COMPAT_TEST test_thread_flag(TIF_31BIT) #elif defined(CONFIG_MIPS) # define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR) diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index f386966..5e1f5e9 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -52,6 +52,7 @@ static char *sensor_location[3] = {NULL, NULL, NULL}; static int limit_adjust = 0; static int fan_speed = -1; +static int verbose = 0; MODULE_AUTHOR("Colin Leroy <colin@colino.net>"); MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and " @@ -66,6 +67,10 @@ module_param(fan_speed, int, 0644); MODULE_PARM_DESC(fan_speed,"Specify starting fan speed (0-255) " "(default 64)"); +module_param(verbose, bool, 0); +MODULE_PARM_DESC(verbose,"Verbose log operations " + "(default 0)"); + struct thermostat { struct i2c_client clt; u8 temps[3]; @@ -149,13 +154,13 @@ detach_thermostat(struct i2c_adapter *adapter) if (thread_therm != NULL) { kthread_stop(thread_therm); } - + printk(KERN_INFO "adt746x: Putting max temperatures back from " "%d, %d, %d to %d, %d, %d\n", th->limits[0], th->limits[1], th->limits[2], th->initial_limits[0], th->initial_limits[1], th->initial_limits[2]); - + for (i = 0; i < 3; i++) write_reg(th, LIMIT_REG[i], th->initial_limits[i]); @@ -212,12 +217,14 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan) return; if (th->last_speed[fan] != speed) { - if (speed == -1) - printk(KERN_DEBUG "adt746x: Setting speed to automatic " - "for %s fan.\n", sensor_location[fan+1]); - else - printk(KERN_DEBUG "adt746x: Setting speed to %d " - "for %s fan.\n", speed, sensor_location[fan+1]); + if (verbose) { + if (speed == -1) + printk(KERN_DEBUG "adt746x: Setting speed to automatic " + "for %s fan.\n", sensor_location[fan+1]); + else + printk(KERN_DEBUG "adt746x: Setting speed to %d " + "for %s fan.\n", speed, sensor_location[fan+1]); + } } else return; @@ -298,10 +305,11 @@ static void update_fans_speed (struct thermostat *th) if (new_speed > 255) new_speed = 255; - printk(KERN_DEBUG "adt746x: setting fans speed to %d " - "(limit exceeded by %d on %s) \n", - new_speed, var, - sensor_location[fan_number+1]); + if (verbose) + printk(KERN_DEBUG "adt746x: Setting fans speed to %d " + "(limit exceeded by %d on %s) \n", + new_speed, var, + sensor_location[fan_number+1]); write_both_fan_speed(th, new_speed); th->last_var[fan_number] = var; } else if (var < -2) { @@ -309,8 +317,9 @@ static void update_fans_speed (struct thermostat *th) * so cold (lastvar >= -1) */ if (i == 2 && lastvar < -1) { if (th->last_speed[fan_number] != 0) - printk(KERN_DEBUG "adt746x: Stopping " - "fans.\n"); + if (verbose) + printk(KERN_DEBUG "adt746x: Stopping " + "fans.\n"); write_both_fan_speed(th, 0); } } @@ -406,7 +415,7 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr, th->initial_limits[i] = read_reg(th, LIMIT_REG[i]); set_limit(th, i); } - + printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d" " to %d, %d, %d\n", th->initial_limits[0], th->initial_limits[1], diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index 190878e..435427d 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -1988,18 +1988,13 @@ static void fcu_lookup_fans(struct device_node *fcu_node) static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match) { - int rc; - state = state_detached; /* Lookup the fans in the device tree */ fcu_lookup_fans(dev->node); /* Add the driver */ - rc = i2c_add_driver(&therm_pm72_driver); - if (rc < 0) - return rc; - return 0; + return i2c_add_driver(&therm_pm72_driver); } static int fcu_of_remove(struct of_device* dev) diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index a0a41ad..c62ed68 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -240,12 +240,7 @@ static int wf_lm75_detach(struct i2c_client *client) static int __init wf_lm75_sensor_init(void) { - int rc; - - rc = i2c_add_driver(&wf_lm75_driver); - if (rc < 0) - return rc; - return 0; + return i2c_add_driver(&wf_lm75_driver); } static void __exit wf_lm75_sensor_exit(void) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 252d55d..76a189c 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -315,6 +315,8 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait) if (bitmap->file == NULL) return write_sb_page(bitmap->mddev, bitmap->offset, page, wait); + flush_dcache_page(page); /* make sure visible to anyone reading the file */ + if (wait) lock_page(page); else { @@ -341,7 +343,7 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait) /* add to list to be waited for by daemon */ struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); item->page = page; - page_cache_get(page); + get_page(page); spin_lock(&bitmap->write_lock); list_add(&item->list, &bitmap->complete_pages); spin_unlock(&bitmap->write_lock); @@ -357,10 +359,10 @@ static struct page *read_page(struct file *file, unsigned long index, struct inode *inode = file->f_mapping->host; struct page *page = NULL; loff_t isize = i_size_read(inode); - unsigned long end_index = isize >> PAGE_CACHE_SHIFT; + unsigned long end_index = isize >> PAGE_SHIFT; - PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE, - (unsigned long long)index << PAGE_CACHE_SHIFT); + PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE, + (unsigned long long)index << PAGE_SHIFT); page = read_cache_page(inode->i_mapping, index, (filler_t *)inode->i_mapping->a_ops->readpage, file); @@ -368,7 +370,7 @@ static struct page *read_page(struct file *file, unsigned long index, goto out; wait_on_page_locked(page); if (!PageUptodate(page) || PageError(page)) { - page_cache_release(page); + put_page(page); page = ERR_PTR(-EIO); goto out; } @@ -376,14 +378,14 @@ static struct page *read_page(struct file *file, unsigned long index, if (index > end_index) /* we have read beyond EOF */ *bytes_read = 0; else if (index == end_index) /* possible short read */ - *bytes_read = isize & ~PAGE_CACHE_MASK; + *bytes_read = isize & ~PAGE_MASK; else - *bytes_read = PAGE_CACHE_SIZE; /* got a full page */ + *bytes_read = PAGE_SIZE; /* got a full page */ out: if (IS_ERR(page)) printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", - (int)PAGE_CACHE_SIZE, - (unsigned long long)index << PAGE_CACHE_SHIFT, + (int)PAGE_SIZE, + (unsigned long long)index << PAGE_SHIFT, PTR_ERR(page)); return page; } @@ -406,11 +408,11 @@ int bitmap_update_sb(struct bitmap *bitmap) return 0; } spin_unlock_irqrestore(&bitmap->lock, flags); - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); sb->events = cpu_to_le64(bitmap->mddev->events); if (!bitmap->mddev->degraded) sb->events_cleared = cpu_to_le64(bitmap->mddev->events); - kunmap(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); return write_page(bitmap, bitmap->sb_page, 1); } @@ -421,7 +423,7 @@ void bitmap_print_sb(struct bitmap *bitmap) if (!bitmap || !bitmap->sb_page) return; - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); @@ -440,7 +442,7 @@ void bitmap_print_sb(struct bitmap *bitmap) printk(KERN_DEBUG " sync size: %llu KB\n", (unsigned long long)le64_to_cpu(sb->sync_size)/2); printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); - kunmap(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); } /* read the superblock from the bitmap file and initialize some bitmap fields */ @@ -466,7 +468,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) return err; } - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); if (bytes_read < sizeof(*sb)) { /* short read */ printk(KERN_INFO "%s: bitmap file superblock truncated\n", @@ -485,12 +487,12 @@ static int bitmap_read_sb(struct bitmap *bitmap) else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) reason = "unrecognized superblock version"; - else if (chunksize < 512 || chunksize > (1024 * 1024 * 4)) - reason = "bitmap chunksize out of range (512B - 4MB)"; + else if (chunksize < PAGE_SIZE) + reason = "bitmap chunksize too small"; else if ((1 << ffz(~chunksize)) != chunksize) reason = "bitmap chunksize not a power of 2"; - else if (daemon_sleep < 1 || daemon_sleep > 15) - reason = "daemon sleep period out of range (1-15s)"; + else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ) + reason = "daemon sleep period out of range"; else if (write_behind > COUNTER_MAX) reason = "write-behind limit out of range (0 - 16383)"; if (reason) { @@ -535,7 +537,7 @@ success: bitmap->events_cleared = bitmap->mddev->events; err = 0; out: - kunmap(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); if (err) bitmap_print_sb(bitmap); return err; @@ -558,9 +560,9 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, spin_unlock_irqrestore(&bitmap->lock, flags); return; } - page_cache_get(bitmap->sb_page); + get_page(bitmap->sb_page); spin_unlock_irqrestore(&bitmap->lock, flags); - sb = (bitmap_super_t *)kmap(bitmap->sb_page); + sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); switch (op) { case MASK_SET: sb->state |= bits; break; @@ -568,8 +570,8 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, break; default: BUG(); } - kunmap(bitmap->sb_page); - page_cache_release(bitmap->sb_page); + kunmap_atomic(sb, KM_USER0); + put_page(bitmap->sb_page); } /* @@ -622,12 +624,11 @@ static void bitmap_file_unmap(struct bitmap *bitmap) while (pages--) if (map[pages]->index != 0) /* 0 is sb_page, release it below */ - page_cache_release(map[pages]); + put_page(map[pages]); kfree(map); kfree(attr); - if (sb_page) - page_cache_release(sb_page); + safe_put_page(sb_page); } static void bitmap_stop_daemon(struct bitmap *bitmap); @@ -654,7 +655,7 @@ static void drain_write_queues(struct bitmap *bitmap) while ((item = dequeue_page(bitmap))) { /* don't bother to wait */ - page_cache_release(item->page); + put_page(item->page); mempool_free(item, bitmap->write_pool); } @@ -763,7 +764,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) /* make sure the page stays cached until it gets written out */ if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY)) - page_cache_get(page); + get_page(page); /* set the bit */ kaddr = kmap_atomic(page, KM_USER0); @@ -854,6 +855,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) unsigned long bytes, offset, dummy; int outofdate; int ret = -ENOSPC; + void *paddr; chunks = bitmap->chunks; file = bitmap->file; @@ -887,12 +889,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) if (!bitmap->filemap) goto out; - bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL); + bitmap->filemap_attr = kzalloc(sizeof(long) * num_pages, GFP_KERNEL); if (!bitmap->filemap_attr) goto out; - memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages); - oldindex = ~0L; for (i = 0; i < chunks; i++) { @@ -901,8 +901,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) bit = file_page_offset(i); if (index != oldindex) { /* this is a new page, read it in */ /* unmap the old page, we're done with it */ - if (oldpage != NULL) - kunmap(oldpage); if (index == 0) { /* * if we're here then the superblock page @@ -925,30 +923,32 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) oldindex = index; oldpage = page; - kmap(page); if (outofdate) { /* * if bitmap is out of date, dirty the * whole page and write it out */ - memset(page_address(page) + offset, 0xff, + paddr = kmap_atomic(page, KM_USER0); + memset(paddr + offset, 0xff, PAGE_SIZE - offset); + kunmap_atomic(paddr, KM_USER0); ret = write_page(bitmap, page, 1); if (ret) { - kunmap(page); /* release, page not in filemap yet */ - page_cache_release(page); + put_page(page); goto out; } } bitmap->filemap[bitmap->file_pages++] = page; } + paddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) - b = test_bit(bit, page_address(page)); + b = test_bit(bit, paddr); else - b = ext2_test_bit(bit, page_address(page)); + b = ext2_test_bit(bit, paddr); + kunmap_atomic(paddr, KM_USER0); if (b) { /* if the disk bit is set, set the memory bit */ bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), @@ -963,9 +963,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) ret = 0; bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); - if (page) /* unmap the last page */ - kunmap(page); - if (bit_cnt) { /* Kick recovery if any bits were set */ set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); md_wakeup_thread(bitmap->mddev->thread); @@ -1021,6 +1018,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) int err = 0; int blocks; int attr; + void *paddr; if (bitmap == NULL) return 0; @@ -1043,7 +1041,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) /* skip this page unless it's marked as needing cleaning */ if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { if (attr & BITMAP_PAGE_NEEDWRITE) { - page_cache_get(page); + get_page(page); clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); } spin_unlock_irqrestore(&bitmap->lock, flags); @@ -1057,13 +1055,13 @@ int bitmap_daemon_work(struct bitmap *bitmap) default: bitmap_file_kick(bitmap); } - page_cache_release(page); + put_page(page); } continue; } /* grab the new page, sync and release the old */ - page_cache_get(page); + get_page(page); if (lastpage != NULL) { if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); @@ -1077,14 +1075,12 @@ int bitmap_daemon_work(struct bitmap *bitmap) set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); } - kunmap(lastpage); - page_cache_release(lastpage); + put_page(lastpage); if (err) bitmap_file_kick(bitmap); } else spin_unlock_irqrestore(&bitmap->lock, flags); lastpage = page; - kmap(page); /* printk("bitmap clean at page %lu\n", j); */ @@ -1107,10 +1103,12 @@ int bitmap_daemon_work(struct bitmap *bitmap) -1); /* clear the bit */ + paddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) - clear_bit(file_page_offset(j), page_address(page)); + clear_bit(file_page_offset(j), paddr); else - ext2_clear_bit(file_page_offset(j), page_address(page)); + ext2_clear_bit(file_page_offset(j), paddr); + kunmap_atomic(paddr, KM_USER0); } } spin_unlock_irqrestore(&bitmap->lock, flags); @@ -1118,7 +1116,6 @@ int bitmap_daemon_work(struct bitmap *bitmap) /* now sync the final page */ if (lastpage != NULL) { - kunmap(lastpage); spin_lock_irqsave(&bitmap->lock, flags); if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) { clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); @@ -1133,7 +1130,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) spin_unlock_irqrestore(&bitmap->lock, flags); } - page_cache_release(lastpage); + put_page(lastpage); } return err; @@ -1184,7 +1181,7 @@ static void bitmap_writeback_daemon(mddev_t *mddev) PRINTK("finished page writeback: %p\n", page); err = PageError(page); - page_cache_release(page); + put_page(page); if (err) { printk(KERN_WARNING "%s: bitmap file writeback " "failed (page %lu): %d\n", @@ -1530,6 +1527,8 @@ void bitmap_destroy(mddev_t *mddev) return; mddev->bitmap = NULL; /* disconnect from the md device */ + if (mddev->thread) + mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; bitmap_free(bitmap); } @@ -1555,12 +1554,10 @@ int bitmap_create(mddev_t *mddev) BUG_ON(file && mddev->bitmap_offset); - bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return -ENOMEM; - memset(bitmap, 0, sizeof(*bitmap)); - spin_lock_init(&bitmap->lock); bitmap->mddev = mddev; @@ -1601,12 +1598,11 @@ int bitmap_create(mddev_t *mddev) #ifdef INJECT_FATAL_FAULT_1 bitmap->bp = NULL; #else - bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); + bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); #endif err = -ENOMEM; if (!bitmap->bp) goto error; - memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp)); bitmap->flags |= BITMAP_ACTIVE; @@ -1636,6 +1632,8 @@ int bitmap_create(mddev_t *mddev) if (IS_ERR(bitmap->writeback_daemon)) return PTR_ERR(bitmap->writeback_daemon); + mddev->thread->timeout = bitmap->daemon_sleep * HZ; + return bitmap_update_sb(bitmap); error: diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index cf66310..a601a42 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -690,6 +690,8 @@ bad3: bad2: crypto_free_tfm(tfm); bad1: + /* Must zero key material before freeing */ + memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); kfree(cc); return -EINVAL; } @@ -706,6 +708,9 @@ static void crypt_dtr(struct dm_target *ti) cc->iv_gen_ops->dtr(cc); crypto_free_tfm(cc->tfm); dm_put_device(ti, cc->dev); + + /* Must zero key material before freeing */ + memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); kfree(cc); } diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h index 1a77f32..f9035bf 100644 --- a/drivers/md/dm-io.h +++ b/drivers/md/dm-io.h @@ -9,9 +9,6 @@ #include "dm.h" -/* FIXME make this configurable */ -#define DM_MAX_IO_REGIONS 8 - struct io_region { struct block_device *bdev; sector_t sector; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 07d44e1..561bda5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -270,6 +270,7 @@ static int dm_hash_rename(const char *old, const char *new) { char *new_name, *old_name; struct hash_cell *hc; + struct dm_table *table; /* * duplicate new. @@ -317,6 +318,15 @@ static int dm_hash_rename(const char *old, const char *new) /* rename the device node in devfs */ register_with_devfs(hc); + /* + * Wake up any dm event waiters. + */ + table = dm_get_table(hc->md); + if (table) { + dm_table_event(table); + dm_table_put(table); + } + up_write(&_hash_lock); kfree(old_name); return 0; @@ -683,14 +693,18 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) static int do_suspend(struct dm_ioctl *param) { int r = 0; + int do_lockfs = 1; struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; + if (param->flags & DM_SKIP_LOCKFS_FLAG) + do_lockfs = 0; + if (!dm_suspended(md)) - r = dm_suspend(md); + r = dm_suspend(md, do_lockfs); if (!r) r = __dev_status(md, param); @@ -702,6 +716,7 @@ static int do_suspend(struct dm_ioctl *param) static int do_resume(struct dm_ioctl *param) { int r = 0; + int do_lockfs = 1; struct hash_cell *hc; struct mapped_device *md; struct dm_table *new_map; @@ -727,8 +742,10 @@ static int do_resume(struct dm_ioctl *param) /* Do we need to load a new map ? */ if (new_map) { /* Suspend if it isn't already suspended */ + if (param->flags & DM_SKIP_LOCKFS_FLAG) + do_lockfs = 0; if (!dm_suspended(md)) - dm_suspend(md); + dm_suspend(md, do_lockfs); r = dm_swap_table(md, new_map); if (r) { diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index a76349c..efe4adf 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -573,7 +573,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region) lc->sync_search); lc->sync_search = *region + 1; - if (*region == lc->region_count) + if (*region >= lc->region_count) return 0; } while (log_test_bit(lc->recovering_bits, *region)); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 6b0fc16..6cfa8d4 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -562,6 +562,8 @@ struct mirror_set { region_t nr_regions; int in_sync; + struct mirror *default_mirror; /* Default mirror */ + unsigned int nr_mirrors; struct mirror mirror[0]; }; @@ -611,7 +613,7 @@ static int recover(struct mirror_set *ms, struct region *reg) unsigned long flags = 0; /* fill in the source */ - m = ms->mirror + DEFAULT_MIRROR; + m = ms->default_mirror; from.bdev = m->dev->bdev; from.sector = m->offset + region_to_sector(reg->rh, reg->key); if (reg->key == (ms->nr_regions - 1)) { @@ -627,7 +629,7 @@ static int recover(struct mirror_set *ms, struct region *reg) /* fill in the destinations */ for (i = 0, dest = to; i < ms->nr_mirrors; i++) { - if (i == DEFAULT_MIRROR) + if (&ms->mirror[i] == ms->default_mirror) continue; m = ms->mirror + i; @@ -682,7 +684,7 @@ static void do_recovery(struct mirror_set *ms) static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) { /* FIXME: add read balancing */ - return ms->mirror + DEFAULT_MIRROR; + return ms->default_mirror; } /* @@ -709,7 +711,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) if (rh_in_sync(&ms->rh, region, 0)) m = choose_mirror(ms, bio->bi_sector); else - m = ms->mirror + DEFAULT_MIRROR; + m = ms->default_mirror; map_bio(ms, m, bio); generic_make_request(bio); @@ -833,7 +835,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) rh_delay(&ms->rh, bio); while ((bio = bio_list_pop(&nosync))) { - map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); + map_bio(ms, ms->default_mirror, bio); generic_make_request(bio); } } @@ -900,6 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, ms->nr_mirrors = nr_mirrors; ms->nr_regions = dm_sector_div_up(ti->len, region_size); ms->in_sync = 0; + ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { ti->error = "dm-mirror: Error creating dirty region hash"; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ab54f99..4b9dd8f 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -371,6 +371,20 @@ static inline ulong round_up(ulong n, ulong size) return (n + size) & ~size; } +static void read_snapshot_metadata(struct dm_snapshot *s) +{ + if (s->have_metadata) + return; + + if (s->store.read_metadata(&s->store)) { + down_write(&s->lock); + s->valid = 0; + up_write(&s->lock); + } + + s->have_metadata = 1; +} + /* * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> */ @@ -848,16 +862,7 @@ static void snapshot_resume(struct dm_target *ti) { struct dm_snapshot *s = (struct dm_snapshot *) ti->private; - if (s->have_metadata) - return; - - if (s->store.read_metadata(&s->store)) { - down_write(&s->lock); - s->valid = 0; - up_write(&s->lock); - } - - s->have_metadata = 1; + read_snapshot_metadata(s); } static int snapshot_status(struct dm_target *ti, status_type_t type, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 930b9fc..0e48151 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -55,6 +55,7 @@ union map_info *dm_get_mapinfo(struct bio *bio) */ #define DMF_BLOCK_IO 0 #define DMF_SUSPENDED 1 +#define DMF_FROZEN 2 struct mapped_device { struct rw_semaphore io_lock; @@ -97,7 +98,7 @@ struct mapped_device { * freeze/thaw support require holding onto a super block */ struct super_block *frozen_sb; - struct block_device *frozen_bdev; + struct block_device *suspended_bdev; }; #define MIN_IOS 256 @@ -836,9 +837,9 @@ static void __set_size(struct mapped_device *md, sector_t size) { set_capacity(md->disk, size); - down(&md->frozen_bdev->bd_inode->i_sem); - i_size_write(md->frozen_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); - up(&md->frozen_bdev->bd_inode->i_sem); + down(&md->suspended_bdev->bd_inode->i_sem); + i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); + up(&md->suspended_bdev->bd_inode->i_sem); } static int __bind(struct mapped_device *md, struct dm_table *t) @@ -902,10 +903,9 @@ int dm_create_with_minor(unsigned int minor, struct mapped_device **result) return create_aux(minor, 1, result); } -void *dm_get_mdptr(dev_t dev) +static struct mapped_device *dm_find_md(dev_t dev) { struct mapped_device *md; - void *mdptr = NULL; unsigned minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) @@ -914,12 +914,32 @@ void *dm_get_mdptr(dev_t dev) down(&_minor_lock); md = idr_find(&_minor_idr, minor); - - if (md && (dm_disk(md)->first_minor == minor)) - mdptr = md->interface_ptr; + if (!md || (dm_disk(md)->first_minor != minor)) + md = NULL; up(&_minor_lock); + return md; +} + +struct mapped_device *dm_get_md(dev_t dev) +{ + struct mapped_device *md = dm_find_md(dev); + + if (md) + dm_get(md); + + return md; +} + +void *dm_get_mdptr(dev_t dev) +{ + struct mapped_device *md; + void *mdptr = NULL; + + md = dm_find_md(dev); + if (md) + mdptr = md->interface_ptr; return mdptr; } @@ -991,43 +1011,33 @@ out: */ static int lock_fs(struct mapped_device *md) { - int r = -ENOMEM; - - md->frozen_bdev = bdget_disk(md->disk, 0); - if (!md->frozen_bdev) { - DMWARN("bdget failed in lock_fs"); - goto out; - } + int r; WARN_ON(md->frozen_sb); - md->frozen_sb = freeze_bdev(md->frozen_bdev); + md->frozen_sb = freeze_bdev(md->suspended_bdev); if (IS_ERR(md->frozen_sb)) { r = PTR_ERR(md->frozen_sb); - goto out_bdput; + md->frozen_sb = NULL; + return r; } + set_bit(DMF_FROZEN, &md->flags); + /* don't bdput right now, we don't want the bdev - * to go away while it is locked. We'll bdput - * in unlock_fs + * to go away while it is locked. */ return 0; - -out_bdput: - bdput(md->frozen_bdev); - md->frozen_sb = NULL; - md->frozen_bdev = NULL; -out: - return r; } static void unlock_fs(struct mapped_device *md) { - thaw_bdev(md->frozen_bdev, md->frozen_sb); - bdput(md->frozen_bdev); + if (!test_bit(DMF_FROZEN, &md->flags)) + return; + thaw_bdev(md->suspended_bdev, md->frozen_sb); md->frozen_sb = NULL; - md->frozen_bdev = NULL; + clear_bit(DMF_FROZEN, &md->flags); } /* @@ -1037,7 +1047,7 @@ static void unlock_fs(struct mapped_device *md) * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ -int dm_suspend(struct mapped_device *md) +int dm_suspend(struct mapped_device *md, int do_lockfs) { struct dm_table *map = NULL; DECLARE_WAITQUEUE(wait, current); @@ -1053,10 +1063,19 @@ int dm_suspend(struct mapped_device *md) /* This does not get reverted if there's an error later. */ dm_table_presuspend_targets(map); - /* Flush I/O to the device. */ - r = lock_fs(md); - if (r) + md->suspended_bdev = bdget_disk(md->disk, 0); + if (!md->suspended_bdev) { + DMWARN("bdget failed in dm_suspend"); + r = -ENOMEM; goto out; + } + + /* Flush I/O to the device. */ + if (do_lockfs) { + r = lock_fs(md); + if (r) + goto out; + } /* * First we set the BLOCK_IO flag so no more ios will be mapped. @@ -1105,6 +1124,11 @@ int dm_suspend(struct mapped_device *md) r = 0; out: + if (r && md->suspended_bdev) { + bdput(md->suspended_bdev); + md->suspended_bdev = NULL; + } + dm_table_put(map); up(&md->suspend_lock); return r; @@ -1135,6 +1159,9 @@ int dm_resume(struct mapped_device *md) unlock_fs(md); + bdput(md->suspended_bdev); + md->suspended_bdev = NULL; + clear_bit(DMF_SUSPENDED, &md->flags); dm_table_unplug_all(map); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index e38c3fc..4eaf075 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -28,7 +28,7 @@ * in types.h. */ #ifdef CONFIG_LBD -#define SECTOR_FORMAT "%Lu" +#define SECTOR_FORMAT "%llu" #else #define SECTOR_FORMAT "%lu" #endif @@ -58,6 +58,7 @@ int dm_create(struct mapped_device **md); int dm_create_with_minor(unsigned int minor, struct mapped_device **md); void dm_set_mdptr(struct mapped_device *md, void *ptr); void *dm_get_mdptr(dev_t dev); +struct mapped_device *dm_get_md(dev_t dev); /* * Reference counting for md. @@ -68,7 +69,7 @@ void dm_put(struct mapped_device *md); /* * A device can still be used while suspended, but I/O is deferred. */ -int dm_suspend(struct mapped_device *md); +int dm_suspend(struct mapped_device *md, int with_lockfs); int dm_resume(struct mapped_device *md); /* diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 0248f8e..a7a5ab5 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -316,9 +316,10 @@ static int stop(mddev_t *mddev) return 0; } -static mdk_personality_t faulty_personality = +static struct mdk_personality faulty_personality = { .name = "faulty", + .level = LEVEL_FAULTY, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -329,15 +330,17 @@ static mdk_personality_t faulty_personality = static int __init raid_init(void) { - return register_md_personality(FAULTY, &faulty_personality); + return register_md_personality(&faulty_personality); } static void raid_exit(void) { - unregister_md_personality(FAULTY); + unregister_md_personality(&faulty_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-10"); /* faulty */ +MODULE_ALIAS("md-faulty"); +MODULE_ALIAS("md-level--5"); diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index eb70364..ca99979 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c @@ -561,11 +561,13 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, * Cancels a kcopyd job, eg. someone might be deactivating a * mirror. */ +#if 0 int kcopyd_cancel(struct kcopyd_job *job, int block) { /* FIXME: finish */ return -1; } +#endif /* 0 */ /*----------------------------------------------------------------- * Unit setup @@ -684,4 +686,3 @@ void kcopyd_client_destroy(struct kcopyd_client *kc) EXPORT_SYMBOL(kcopyd_client_create); EXPORT_SYMBOL(kcopyd_client_destroy); EXPORT_SYMBOL(kcopyd_copy); -EXPORT_SYMBOL(kcopyd_cancel); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 946efef..7775854 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -121,11 +121,10 @@ static int linear_run (mddev_t *mddev) sector_t curr_offset; struct list_head *tmp; - conf = kmalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t), + conf = kzalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t), GFP_KERNEL); if (!conf) goto out; - memset(conf, 0, sizeof(*conf) + mddev->raid_disks*sizeof(dev_info_t)); mddev->private = conf; cnt = 0; @@ -352,9 +351,10 @@ static void linear_status (struct seq_file *seq, mddev_t *mddev) } -static mdk_personality_t linear_personality= +static struct mdk_personality linear_personality = { .name = "linear", + .level = LEVEL_LINEAR, .owner = THIS_MODULE, .make_request = linear_make_request, .run = linear_run, @@ -364,16 +364,18 @@ static mdk_personality_t linear_personality= static int __init linear_init (void) { - return register_md_personality (LINEAR, &linear_personality); + return register_md_personality (&linear_personality); } static void linear_exit (void) { - unregister_md_personality (LINEAR); + unregister_md_personality (&linear_personality); } module_init(linear_init); module_exit(linear_exit); MODULE_LICENSE("GPL"); -MODULE_ALIAS("md-personality-1"); /* LINEAR */ +MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ +MODULE_ALIAS("md-linear"); +MODULE_ALIAS("md-level--1"); diff --git a/drivers/md/md.c b/drivers/md/md.c index 8175a2a..1b76fb2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -42,6 +42,7 @@ #include <linux/devfs_fs_kernel.h> #include <linux/buffer_head.h> /* for invalidate_bdev */ #include <linux/suspend.h> +#include <linux/poll.h> #include <linux/init.h> @@ -67,7 +68,7 @@ static void autostart_arrays (int part); #endif -static mdk_personality_t *pers[MAX_PERSONALITY]; +static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); /* @@ -80,10 +81,22 @@ static DEFINE_SPINLOCK(pers_lock); * idle IO detection. * * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. + * or /sys/block/mdX/md/sync_speed_{min,max} */ static int sysctl_speed_limit_min = 1000; static int sysctl_speed_limit_max = 200000; +static inline int speed_min(mddev_t *mddev) +{ + return mddev->sync_speed_min ? + mddev->sync_speed_min : sysctl_speed_limit_min; +} + +static inline int speed_max(mddev_t *mddev) +{ + return mddev->sync_speed_max ? + mddev->sync_speed_max : sysctl_speed_limit_max; +} static struct ctl_table_header *raid_table_header; @@ -134,6 +147,24 @@ static struct block_device_operations md_fops; static int start_readonly; /* + * We have a system wide 'event count' that is incremented + * on any 'interesting' event, and readers of /proc/mdstat + * can use 'poll' or 'select' to find out when the event + * count increases. + * + * Events are: + * start array, stop array, error, add device, remove device, + * start build, activate spare + */ +static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); +static atomic_t md_event_count; +static void md_new_event(mddev_t *mddev) +{ + atomic_inc(&md_event_count); + wake_up(&md_event_waiters); +} + +/* * Enables to iterate over all existing md arrays * all_mddevs_lock protects this list. */ @@ -209,12 +240,10 @@ static mddev_t * mddev_find(dev_t unit) } spin_unlock(&all_mddevs_lock); - new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL); + new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; - memset(new, 0, sizeof(*new)); - new->unit = unit; if (MAJOR(unit) == MD_MAJOR) new->md_minor = MINOR(unit); @@ -262,7 +291,7 @@ static inline void mddev_unlock(mddev_t * mddev) md_wakeup_thread(mddev->thread); } -mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) +static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) { mdk_rdev_t * rdev; struct list_head *tmp; @@ -286,6 +315,18 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) return NULL; } +static struct mdk_personality *find_pers(int level, char *clevel) +{ + struct mdk_personality *pers; + list_for_each_entry(pers, &pers_list, list) { + if (level != LEVEL_NONE && pers->level == level) + return pers; + if (strcmp(pers->name, clevel)==0) + return pers; + } + return NULL; +} + static inline sector_t calc_dev_sboffset(struct block_device *bdev) { sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; @@ -320,7 +361,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev) static void free_disk_sb(mdk_rdev_t * rdev) { if (rdev->sb_page) { - page_cache_release(rdev->sb_page); + put_page(rdev->sb_page); rdev->sb_loaded = 0; rdev->sb_page = NULL; rdev->sb_offset = 0; @@ -461,6 +502,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size, bio_put(bio); return ret; } +EXPORT_SYMBOL_GPL(sync_page_io); static int read_disk_sb(mdk_rdev_t * rdev, int size) { @@ -665,6 +707,10 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version } rdev->size = calc_dev_size(rdev, sb->chunk_size); + if (rdev->size < sb->size && sb->level > 1) + /* "this cannot possibly happen" ... */ + ret = -EINVAL; + abort: return ret; } @@ -688,6 +734,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->ctime = sb->ctime; mddev->utime = sb->utime; mddev->level = sb->level; + mddev->clevel[0] = 0; mddev->layout = sb->layout; mddev->raid_disks = sb->raid_disks; mddev->size = sb->size; @@ -714,9 +761,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && mddev->bitmap_file == NULL) { - if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) { + if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 + && mddev->level != 10) { /* FIXME use a better test */ - printk(KERN_WARNING "md: bitmaps only support for raid1\n"); + printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); return -EINVAL; } mddev->bitmap_offset = mddev->default_bitmap_offset; @@ -968,6 +1016,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) } rdev->preferred_minor = 0xffff; rdev->data_offset = le64_to_cpu(sb->data_offset); + atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; @@ -1006,6 +1055,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) rdev->size = le64_to_cpu(sb->data_size)/2; if (le32_to_cpu(sb->chunksize)) rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); + + if (le32_to_cpu(sb->size) > rdev->size*2) + return -EINVAL; return 0; } @@ -1023,6 +1075,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); mddev->level = le32_to_cpu(sb->level); + mddev->clevel[0] = 0; mddev->layout = le32_to_cpu(sb->layout); mddev->raid_disks = le32_to_cpu(sb->raid_disks); mddev->size = le64_to_cpu(sb->size)/2; @@ -1037,8 +1090,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && mddev->bitmap_file == NULL ) { - if (mddev->level != 1) { - printk(KERN_WARNING "md: bitmaps only supported for raid1\n"); + if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 + && mddev->level != 10) { + printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); return -EINVAL; } mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); @@ -1105,6 +1159,8 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) else sb->resync_offset = cpu_to_le64(0); + sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); + if (mddev->bitmap && mddev->bitmap_file == NULL) { sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); @@ -1187,6 +1243,14 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) MD_BUG(); return -EINVAL; } + /* make sure rdev->size exceeds mddev->size */ + if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { + if (mddev->pers) + /* Cannot change size, so fail */ + return -ENOSPC; + else + mddev->size = rdev->size; + } same_pdev = match_dev_unit(mddev, rdev); if (same_pdev) printk(KERN_WARNING @@ -1496,6 +1560,26 @@ repeat: } +/* words written to sysfs files may, or my not, be \n terminated. + * We want to accept with case. For this we use cmd_match. + */ +static int cmd_match(const char *cmd, const char *str) +{ + /* See if cmd, written into a sysfs file, matches + * str. They must either be the same, or cmd can + * have a trailing newline + */ + while (*cmd && *str && *cmd == *str) { + cmd++; + str++; + } + if (*cmd == '\n') + cmd++; + if (*str || *cmd) + return 0; + return 1; +} + struct rdev_sysfs_entry { struct attribute attr; ssize_t (*show)(mdk_rdev_t *, char *); @@ -1538,9 +1622,113 @@ super_show(mdk_rdev_t *rdev, char *page) } static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); +static ssize_t +errors_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); +} + +static ssize_t +errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + if (*buf && (*e == 0 || *e == '\n')) { + atomic_set(&rdev->corrected_errors, n); + return len; + } + return -EINVAL; +} +static struct rdev_sysfs_entry rdev_errors = +__ATTR(errors, 0644, errors_show, errors_store); + +static ssize_t +slot_show(mdk_rdev_t *rdev, char *page) +{ + if (rdev->raid_disk < 0) + return sprintf(page, "none\n"); + else + return sprintf(page, "%d\n", rdev->raid_disk); +} + +static ssize_t +slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + int slot = simple_strtoul(buf, &e, 10); + if (strncmp(buf, "none", 4)==0) + slot = -1; + else if (e==buf || (*e && *e!= '\n')) + return -EINVAL; + if (rdev->mddev->pers) + /* Cannot set slot in active array (yet) */ + return -EBUSY; + if (slot >= rdev->mddev->raid_disks) + return -ENOSPC; + rdev->raid_disk = slot; + /* assume it is working */ + rdev->flags = 0; + set_bit(In_sync, &rdev->flags); + return len; +} + + +static struct rdev_sysfs_entry rdev_slot = +__ATTR(slot, 0644, slot_show, slot_store); + +static ssize_t +offset_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); +} + +static ssize_t +offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long long offset = simple_strtoull(buf, &e, 10); + if (e==buf || (*e && *e != '\n')) + return -EINVAL; + if (rdev->mddev->pers) + return -EBUSY; + rdev->data_offset = offset; + return len; +} + +static struct rdev_sysfs_entry rdev_offset = +__ATTR(offset, 0644, offset_show, offset_store); + +static ssize_t +rdev_size_show(mdk_rdev_t *rdev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)rdev->size); +} + +static ssize_t +rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + char *e; + unsigned long long size = simple_strtoull(buf, &e, 10); + if (e==buf || (*e && *e != '\n')) + return -EINVAL; + if (rdev->mddev->pers) + return -EBUSY; + rdev->size = size; + if (size < rdev->mddev->size || rdev->mddev->size == 0) + rdev->mddev->size = size; + return len; +} + +static struct rdev_sysfs_entry rdev_size = +__ATTR(size, 0644, rdev_size_show, rdev_size_store); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_super.attr, + &rdev_errors.attr, + &rdev_slot.attr, + &rdev_offset.attr, + &rdev_size.attr, NULL, }; static ssize_t @@ -1598,12 +1786,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi mdk_rdev_t *rdev; sector_t size; - rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL); + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) { printk(KERN_ERR "md: could not alloc mem for new device!\n"); return ERR_PTR(-ENOMEM); } - memset(rdev, 0, sizeof(*rdev)); if ((err = alloc_disk_sb(rdev))) goto abort_free; @@ -1621,6 +1808,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi rdev->data_offset = 0; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); + atomic_set(&rdev->corrected_errors, 0); size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; if (!size) { @@ -1725,16 +1913,37 @@ static void analyze_sbs(mddev_t * mddev) static ssize_t level_show(mddev_t *mddev, char *page) { - mdk_personality_t *p = mddev->pers; - if (p == NULL && mddev->raid_disks == 0) - return 0; - if (mddev->level >= 0) - return sprintf(page, "raid%d\n", mddev->level); - else + struct mdk_personality *p = mddev->pers; + if (p) return sprintf(page, "%s\n", p->name); + else if (mddev->clevel[0]) + return sprintf(page, "%s\n", mddev->clevel); + else if (mddev->level != LEVEL_NONE) + return sprintf(page, "%d\n", mddev->level); + else + return 0; +} + +static ssize_t +level_store(mddev_t *mddev, const char *buf, size_t len) +{ + int rv = len; + if (mddev->pers) + return -EBUSY; + if (len == 0) + return 0; + if (len >= sizeof(mddev->clevel)) + return -ENOSPC; + strncpy(mddev->clevel, buf, len); + if (mddev->clevel[len-1] == '\n') + len--; + mddev->clevel[len] = 0; + mddev->level = LEVEL_NONE; + return rv; } -static struct md_sysfs_entry md_level = __ATTR_RO(level); +static struct md_sysfs_entry md_level = +__ATTR(level, 0644, level_show, level_store); static ssize_t raid_disks_show(mddev_t *mddev, char *page) @@ -1744,7 +1953,197 @@ raid_disks_show(mddev_t *mddev, char *page) return sprintf(page, "%d\n", mddev->raid_disks); } -static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks); +static int update_raid_disks(mddev_t *mddev, int raid_disks); + +static ssize_t +raid_disks_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* can only set raid_disks if array is not yet active */ + char *e; + int rv = 0; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) + rv = update_raid_disks(mddev, n); + else + mddev->raid_disks = n; + return rv ? rv : len; +} +static struct md_sysfs_entry md_raid_disks = +__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store); + +static ssize_t +chunk_size_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d\n", mddev->chunk_size); +} + +static ssize_t +chunk_size_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* can only set chunk_size if array is not yet active */ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (mddev->pers) + return -EBUSY; + if (!*buf || (*e && *e != '\n')) + return -EINVAL; + + mddev->chunk_size = n; + return len; +} +static struct md_sysfs_entry md_chunk_size = +__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); + +static ssize_t +null_show(mddev_t *mddev, char *page) +{ + return -EINVAL; +} + +static ssize_t +new_dev_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* buf must be %d:%d\n? giving major and minor numbers */ + /* The new device is added to the array. + * If the array has a persistent superblock, we read the + * superblock to initialise info and check validity. + * Otherwise, only checking done is that in bind_rdev_to_array, + * which mainly checks size. + */ + char *e; + int major = simple_strtoul(buf, &e, 10); + int minor; + dev_t dev; + mdk_rdev_t *rdev; + int err; + + if (!*buf || *e != ':' || !e[1] || e[1] == '\n') + return -EINVAL; + minor = simple_strtoul(e+1, &e, 10); + if (*e && *e != '\n') + return -EINVAL; + dev = MKDEV(major, minor); + if (major != MAJOR(dev) || + minor != MINOR(dev)) + return -EOVERFLOW; + + + if (mddev->persistent) { + rdev = md_import_device(dev, mddev->major_version, + mddev->minor_version); + if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { + mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, + mdk_rdev_t, same_set); + err = super_types[mddev->major_version] + .load_super(rdev, rdev0, mddev->minor_version); + if (err < 0) + goto out; + } + } else + rdev = md_import_device(dev, -1, -1); + + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + err = bind_rdev_to_array(rdev, mddev); + out: + if (err) + export_rdev(rdev); + return err ? err : len; +} + +static struct md_sysfs_entry md_new_device = +__ATTR(new_dev, 0200, null_show, new_dev_store); + +static ssize_t +size_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->size); +} + +static int update_size(mddev_t *mddev, unsigned long size); + +static ssize_t +size_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* If array is inactive, we can reduce the component size, but + * not increase it (except from 0). + * If array is active, we can try an on-line resize + */ + char *e; + int err = 0; + unsigned long long size = simple_strtoull(buf, &e, 10); + if (!*buf || *buf == '\n' || + (*e && *e != '\n')) + return -EINVAL; + + if (mddev->pers) { + err = update_size(mddev, size); + md_update_sb(mddev); + } else { + if (mddev->size == 0 || + mddev->size > size) + mddev->size = size; + else + err = -ENOSPC; + } + return err ? err : len; +} + +static struct md_sysfs_entry md_size = +__ATTR(component_size, 0644, size_show, size_store); + + +/* Metdata version. + * This is either 'none' for arrays with externally managed metadata, + * or N.M for internally known formats + */ +static ssize_t +metadata_show(mddev_t *mddev, char *page) +{ + if (mddev->persistent) + return sprintf(page, "%d.%d\n", + mddev->major_version, mddev->minor_version); + else + return sprintf(page, "none\n"); +} + +static ssize_t +metadata_store(mddev_t *mddev, const char *buf, size_t len) +{ + int major, minor; + char *e; + if (!list_empty(&mddev->disks)) + return -EBUSY; + + if (cmd_match(buf, "none")) { + mddev->persistent = 0; + mddev->major_version = 0; + mddev->minor_version = 90; + return len; + } + major = simple_strtoul(buf, &e, 10); + if (e==buf || *e != '.') + return -EINVAL; + buf = e+1; + minor = simple_strtoul(buf, &e, 10); + if (e==buf || *e != '\n') + return -EINVAL; + if (major >= sizeof(super_types)/sizeof(super_types[0]) || + super_types[major].name == NULL) + return -ENOENT; + mddev->major_version = major; + mddev->minor_version = minor; + mddev->persistent = 1; + return len; +} + +static struct md_sysfs_entry md_metadata = +__ATTR(metadata_version, 0644, metadata_show, metadata_store); static ssize_t action_show(mddev_t *mddev, char *page) @@ -1771,31 +2170,27 @@ action_store(mddev_t *mddev, const char *page, size_t len) if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; - if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) { + if (cmd_match(page, "idle")) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_unregister_thread(mddev->sync_thread); mddev->sync_thread = NULL; mddev->recovery = 0; } - return len; - } - - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; - if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 || - strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0) + else if (cmd_match(page, "resync") || cmd_match(page, "recover")) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); else { - if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) + if (cmd_match(page, "check")) set_bit(MD_RECOVERY_CHECK, &mddev->recovery); - else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) + else if (cmd_match(page, "repair")) return -EINVAL; set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); return len; } @@ -1814,15 +2209,107 @@ md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); +static ssize_t +sync_min_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", speed_min(mddev), + mddev->sync_speed_min ? "local": "system"); +} + +static ssize_t +sync_min_store(mddev_t *mddev, const char *buf, size_t len) +{ + int min; + char *e; + if (strncmp(buf, "system", 6)==0) { + mddev->sync_speed_min = 0; + return len; + } + min = simple_strtoul(buf, &e, 10); + if (buf == e || (*e && *e != '\n') || min <= 0) + return -EINVAL; + mddev->sync_speed_min = min; + return len; +} + +static struct md_sysfs_entry md_sync_min = +__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); + +static ssize_t +sync_max_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", speed_max(mddev), + mddev->sync_speed_max ? "local": "system"); +} + +static ssize_t +sync_max_store(mddev_t *mddev, const char *buf, size_t len) +{ + int max; + char *e; + if (strncmp(buf, "system", 6)==0) { + mddev->sync_speed_max = 0; + return len; + } + max = simple_strtoul(buf, &e, 10); + if (buf == e || (*e && *e != '\n') || max <= 0) + return -EINVAL; + mddev->sync_speed_max = max; + return len; +} + +static struct md_sysfs_entry md_sync_max = +__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); + + +static ssize_t +sync_speed_show(mddev_t *mddev, char *page) +{ + unsigned long resync, dt, db; + resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); + dt = ((jiffies - mddev->resync_mark) / HZ); + if (!dt) dt++; + db = resync - (mddev->resync_mark_cnt); + return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ +} + +static struct md_sysfs_entry +md_sync_speed = __ATTR_RO(sync_speed); + +static ssize_t +sync_completed_show(mddev_t *mddev, char *page) +{ + unsigned long max_blocks, resync; + + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + max_blocks = mddev->resync_max_sectors; + else + max_blocks = mddev->size << 1; + + resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); + return sprintf(page, "%lu / %lu\n", resync, max_blocks); +} + +static struct md_sysfs_entry +md_sync_completed = __ATTR_RO(sync_completed); + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, + &md_chunk_size.attr, + &md_size.attr, + &md_metadata.attr, + &md_new_device.attr, NULL, }; static struct attribute *md_redundancy_attrs[] = { &md_scan_mode.attr, &md_mismatches.attr, + &md_sync_min.attr, + &md_sync_max.attr, + &md_sync_speed.attr, + &md_sync_completed.attr, NULL, }; static struct attribute_group md_redundancy_group = { @@ -1937,14 +2424,16 @@ static void md_safemode_timeout(unsigned long data) md_wakeup_thread(mddev->thread); } +static int start_dirty_degraded; static int do_md_run(mddev_t * mddev) { - int pnum, err; + int err; int chunk_size; struct list_head *tmp; mdk_rdev_t *rdev; struct gendisk *disk; + struct mdk_personality *pers; char b[BDEVNAME_SIZE]; if (list_empty(&mddev->disks)) @@ -1961,20 +2450,8 @@ static int do_md_run(mddev_t * mddev) analyze_sbs(mddev); chunk_size = mddev->chunk_size; - pnum = level_to_pers(mddev->level); - if ((pnum != MULTIPATH) && (pnum != RAID1)) { - if (!chunk_size) { - /* - * 'default chunksize' in the old md code used to - * be PAGE_SIZE, baaad. - * we abort here to be on the safe side. We don't - * want to continue the bad practice. - */ - printk(KERN_ERR - "no chunksize specified, see 'man raidtab'\n"); - return -EINVAL; - } + if (chunk_size) { if (chunk_size > MAX_CHUNK_SIZE) { printk(KERN_ERR "too big chunk_size: %d > %d\n", chunk_size, MAX_CHUNK_SIZE); @@ -2010,10 +2487,10 @@ static int do_md_run(mddev_t * mddev) } #ifdef CONFIG_KMOD - if (!pers[pnum]) - { - request_module("md-personality-%d", pnum); - } + if (mddev->level != LEVEL_NONE) + request_module("md-level-%d", mddev->level); + else if (mddev->clevel[0]) + request_module("md-%s", mddev->clevel); #endif /* @@ -2035,30 +2512,39 @@ static int do_md_run(mddev_t * mddev) return -ENOMEM; spin_lock(&pers_lock); - if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) { + pers = find_pers(mddev->level, mddev->clevel); + if (!pers || !try_module_get(pers->owner)) { spin_unlock(&pers_lock); - printk(KERN_WARNING "md: personality %d is not loaded!\n", - pnum); + if (mddev->level != LEVEL_NONE) + printk(KERN_WARNING "md: personality for level %d is not loaded!\n", + mddev->level); + else + printk(KERN_WARNING "md: personality for level %s is not loaded!\n", + mddev->clevel); return -EINVAL; } - - mddev->pers = pers[pnum]; + mddev->pers = pers; spin_unlock(&pers_lock); + mddev->level = pers->level; + strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); mddev->recovery = 0; mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ mddev->barriers_work = 1; + mddev->ok_start_degraded = start_dirty_degraded; if (start_readonly) mddev->ro = 2; /* read-only, but switch on first write */ - /* before we start the array running, initialise the bitmap */ - err = bitmap_create(mddev); - if (err) - printk(KERN_ERR "%s: failed to create bitmap (%d)\n", - mdname(mddev), err); - else - err = mddev->pers->run(mddev); + err = mddev->pers->run(mddev); + if (!err && mddev->pers->sync_request) { + err = bitmap_create(mddev); + if (err) { + printk(KERN_ERR "%s: failed to create bitmap (%d)\n", + mdname(mddev), err); + mddev->pers->stop(mddev); + } + } if (err) { printk(KERN_ERR "md: pers->run() failed ...\n"); module_put(mddev->pers->owner); @@ -2104,6 +2590,7 @@ static int do_md_run(mddev_t * mddev) mddev->queue->make_request_fn = mddev->pers->make_request; mddev->changed = 1; + md_new_event(mddev); return 0; } @@ -2231,6 +2718,7 @@ static int do_md_stop(mddev_t * mddev, int ro) printk(KERN_INFO "md: %s switched to read-only mode.\n", mdname(mddev)); err = 0; + md_new_event(mddev); out: return err; } @@ -2668,12 +3156,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) if (info->state & (1<<MD_DISK_WRITEMOSTLY)) set_bit(WriteMostly, &rdev->flags); - err = bind_rdev_to_array(rdev, mddev); - if (err) { - export_rdev(rdev); - return err; - } - if (!mddev->persistent) { printk(KERN_INFO "md: nonpersistent superblock ...\n"); rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; @@ -2681,8 +3163,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) rdev->sb_offset = calc_dev_sboffset(rdev->bdev); rdev->size = calc_dev_size(rdev, mddev->chunk_size); - if (!mddev->size || (mddev->size > rdev->size)) - mddev->size = rdev->size; + err = bind_rdev_to_array(rdev, mddev); + if (err) { + export_rdev(rdev); + return err; + } } return 0; @@ -2705,6 +3190,7 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev) kick_rdev_from_array(rdev); md_update_sb(mddev); + md_new_event(mddev); return 0; busy: @@ -2753,15 +3239,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) size = calc_dev_size(rdev, mddev->chunk_size); rdev->size = size; - if (size < mddev->size) { - printk(KERN_WARNING - "%s: disk size %llu blocks < array size %llu\n", - mdname(mddev), (unsigned long long)size, - (unsigned long long)mddev->size); - err = -ENOSPC; - goto abort_export; - } - if (test_bit(Faulty, &rdev->flags)) { printk(KERN_WARNING "md: can not hot-add faulty %s disk to %s!\n", @@ -2771,7 +3248,9 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) } clear_bit(In_sync, &rdev->flags); rdev->desc_nr = -1; - bind_rdev_to_array(rdev, mddev); + err = bind_rdev_to_array(rdev, mddev); + if (err) + goto abort_export; /* * The rest should better be atomic, we can have disk failures @@ -2795,7 +3274,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); - + md_new_event(mddev); return 0; abort_unbind_export: @@ -2942,6 +3421,81 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) return 0; } +static int update_size(mddev_t *mddev, unsigned long size) +{ + mdk_rdev_t * rdev; + int rv; + struct list_head *tmp; + + if (mddev->pers->resize == NULL) + return -EINVAL; + /* The "size" is the amount of each device that is used. + * This can only make sense for arrays with redundancy. + * linear and raid0 always use whatever space is available + * We can only consider changing the size if no resync + * or reconstruction is happening, and if the new size + * is acceptable. It must fit before the sb_offset or, + * if that is <data_offset, it must fit before the + * size of each device. + * If size is zero, we find the largest size that fits. + */ + if (mddev->sync_thread) + return -EBUSY; + ITERATE_RDEV(mddev,rdev,tmp) { + sector_t avail; + int fit = (size == 0); + if (rdev->sb_offset > rdev->data_offset) + avail = (rdev->sb_offset*2) - rdev->data_offset; + else + avail = get_capacity(rdev->bdev->bd_disk) + - rdev->data_offset; + if (fit && (size == 0 || size > avail/2)) + size = avail/2; + if (avail < ((sector_t)size << 1)) + return -ENOSPC; + } + rv = mddev->pers->resize(mddev, (sector_t)size *2); + if (!rv) { + struct block_device *bdev; + + bdev = bdget_disk(mddev->gendisk, 0); + if (bdev) { + down(&bdev->bd_inode->i_sem); + i_size_write(bdev->bd_inode, mddev->array_size << 10); + up(&bdev->bd_inode->i_sem); + bdput(bdev); + } + } + return rv; +} + +static int update_raid_disks(mddev_t *mddev, int raid_disks) +{ + int rv; + /* change the number of raid disks */ + if (mddev->pers->reshape == NULL) + return -EINVAL; + if (raid_disks <= 0 || + raid_disks >= mddev->max_disks) + return -EINVAL; + if (mddev->sync_thread) + return -EBUSY; + rv = mddev->pers->reshape(mddev, raid_disks); + if (!rv) { + struct block_device *bdev; + + bdev = bdget_disk(mddev->gendisk, 0); + if (bdev) { + down(&bdev->bd_inode->i_sem); + i_size_write(bdev->bd_inode, mddev->array_size << 10); + up(&bdev->bd_inode->i_sem); + bdput(bdev); + } + } + return rv; +} + + /* * update_array_info is used to change the configuration of an * on-line array. @@ -2990,71 +3544,12 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) else return mddev->pers->reconfig(mddev, info->layout, -1); } - if (mddev->size != info->size) { - mdk_rdev_t * rdev; - struct list_head *tmp; - if (mddev->pers->resize == NULL) - return -EINVAL; - /* The "size" is the amount of each device that is used. - * This can only make sense for arrays with redundancy. - * linear and raid0 always use whatever space is available - * We can only consider changing the size if no resync - * or reconstruction is happening, and if the new size - * is acceptable. It must fit before the sb_offset or, - * if that is <data_offset, it must fit before the - * size of each device. - * If size is zero, we find the largest size that fits. - */ - if (mddev->sync_thread) - return -EBUSY; - ITERATE_RDEV(mddev,rdev,tmp) { - sector_t avail; - int fit = (info->size == 0); - if (rdev->sb_offset > rdev->data_offset) - avail = (rdev->sb_offset*2) - rdev->data_offset; - else - avail = get_capacity(rdev->bdev->bd_disk) - - rdev->data_offset; - if (fit && (info->size == 0 || info->size > avail/2)) - info->size = avail/2; - if (avail < ((sector_t)info->size << 1)) - return -ENOSPC; - } - rv = mddev->pers->resize(mddev, (sector_t)info->size *2); - if (!rv) { - struct block_device *bdev; - - bdev = bdget_disk(mddev->gendisk, 0); - if (bdev) { - down(&bdev->bd_inode->i_sem); - i_size_write(bdev->bd_inode, mddev->array_size << 10); - up(&bdev->bd_inode->i_sem); - bdput(bdev); - } - } - } - if (mddev->raid_disks != info->raid_disks) { - /* change the number of raid disks */ - if (mddev->pers->reshape == NULL) - return -EINVAL; - if (info->raid_disks <= 0 || - info->raid_disks >= mddev->max_disks) - return -EINVAL; - if (mddev->sync_thread) - return -EBUSY; - rv = mddev->pers->reshape(mddev, info->raid_disks); - if (!rv) { - struct block_device *bdev; - - bdev = bdget_disk(mddev->gendisk, 0); - if (bdev) { - down(&bdev->bd_inode->i_sem); - i_size_write(bdev->bd_inode, mddev->array_size << 10); - up(&bdev->bd_inode->i_sem); - bdput(bdev); - } - } - } + if (mddev->size != info->size) + rv = update_size(mddev, info->size); + + if (mddev->raid_disks != info->raid_disks) + rv = update_raid_disks(mddev, info->raid_disks); + if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { if (mddev->pers->quiesce == NULL) return -EINVAL; @@ -3476,11 +3971,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, { mdk_thread_t *thread; - thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL); + thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); if (!thread) return NULL; - memset(thread, 0, sizeof(mdk_thread_t)); init_waitqueue_head(&thread->wqueue); thread->run = run; @@ -3524,6 +4018,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); + md_new_event(mddev); } /* seq_file implementation /proc/mdstat */ @@ -3664,24 +4159,29 @@ static void md_seq_stop(struct seq_file *seq, void *v) mddev_put(mddev); } +struct mdstat_info { + int event; +}; + static int md_seq_show(struct seq_file *seq, void *v) { mddev_t *mddev = v; sector_t size; struct list_head *tmp2; mdk_rdev_t *rdev; - int i; + struct mdstat_info *mi = seq->private; struct bitmap *bitmap; if (v == (void*)1) { + struct mdk_personality *pers; seq_printf(seq, "Personalities : "); spin_lock(&pers_lock); - for (i = 0; i < MAX_PERSONALITY; i++) - if (pers[i]) - seq_printf(seq, "[%s] ", pers[i]->name); + list_for_each_entry(pers, &pers_list, list) + seq_printf(seq, "[%s] ", pers->name); spin_unlock(&pers_lock); seq_printf(seq, "\n"); + mi->event = atomic_read(&md_event_count); return 0; } if (v == (void*)2) { @@ -3790,47 +4290,68 @@ static struct seq_operations md_seq_ops = { static int md_seq_open(struct inode *inode, struct file *file) { int error; + struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); + if (mi == NULL) + return -ENOMEM; error = seq_open(file, &md_seq_ops); + if (error) + kfree(mi); + else { + struct seq_file *p = file->private_data; + p->private = mi; + mi->event = atomic_read(&md_event_count); + } return error; } +static int md_seq_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct mdstat_info *mi = m->private; + m->private = NULL; + kfree(mi); + return seq_release(inode, file); +} + +static unsigned int mdstat_poll(struct file *filp, poll_table *wait) +{ + struct seq_file *m = filp->private_data; + struct mdstat_info *mi = m->private; + int mask; + + poll_wait(filp, &md_event_waiters, wait); + + /* always allow read */ + mask = POLLIN | POLLRDNORM; + + if (mi->event != atomic_read(&md_event_count)) + mask |= POLLERR | POLLPRI; + return mask; +} + static struct file_operations md_seq_fops = { .open = md_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = md_seq_release, + .poll = mdstat_poll, }; -int register_md_personality(int pnum, mdk_personality_t *p) +int register_md_personality(struct mdk_personality *p) { - if (pnum >= MAX_PERSONALITY) { - printk(KERN_ERR - "md: tried to install personality %s as nr %d, but max is %lu\n", - p->name, pnum, MAX_PERSONALITY-1); - return -EINVAL; - } - spin_lock(&pers_lock); - if (pers[pnum]) { - spin_unlock(&pers_lock); - return -EBUSY; - } - - pers[pnum] = p; - printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum); + list_add_tail(&p->list, &pers_list); + printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); spin_unlock(&pers_lock); return 0; } -int unregister_md_personality(int pnum) +int unregister_md_personality(struct mdk_personality *p) { - if (pnum >= MAX_PERSONALITY) - return -EINVAL; - - printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name); + printk(KERN_INFO "md: %s personality unregistered\n", p->name); spin_lock(&pers_lock); - pers[pnum] = NULL; + list_del_init(&p->list); spin_unlock(&pers_lock); return 0; } @@ -4012,10 +4533,10 @@ static void md_do_sync(mddev_t *mddev) printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" - " %d KB/sec/disc.\n", sysctl_speed_limit_min); + " %d KB/sec/disc.\n", speed_min(mddev)); printk(KERN_INFO "md: using maximum available idle IO bandwidth " "(but not more than %d KB/sec) for reconstruction.\n", - sysctl_speed_limit_max); + speed_max(mddev)); is_mddev_idle(mddev); /* this also initializes IO event counters */ /* we don't use the checkpoint if there's a bitmap */ @@ -4056,7 +4577,7 @@ static void md_do_sync(mddev_t *mddev) skipped = 0; sectors = mddev->pers->sync_request(mddev, j, &skipped, - currspeed < sysctl_speed_limit_min); + currspeed < speed_min(mddev)); if (sectors == 0) { set_bit(MD_RECOVERY_ERR, &mddev->recovery); goto out; @@ -4069,7 +4590,11 @@ static void md_do_sync(mddev_t *mddev) j += sectors; if (j>1) mddev->curr_resync = j; - + if (last_check == 0) + /* this is the earliers that rebuilt will be + * visible in /proc/mdstat + */ + md_new_event(mddev); if (last_check + window > io_sectors || j == max_sectors) continue; @@ -4117,8 +4642,8 @@ static void md_do_sync(mddev_t *mddev) currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 /((jiffies-mddev->resync_mark)/HZ +1) +1; - if (currspeed > sysctl_speed_limit_min) { - if ((currspeed > sysctl_speed_limit_max) || + if (currspeed > speed_min(mddev)) { + if ((currspeed > speed_max(mddev)) || !is_mddev_idle(mddev)) { msleep(500); goto repeat; @@ -4255,6 +4780,7 @@ void md_check_recovery(mddev_t *mddev) mddev->recovery = 0; /* flag recovery needed just to double check */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_new_event(mddev); goto unlock; } /* Clear some bits that don't mean anything, but @@ -4292,6 +4818,7 @@ void md_check_recovery(mddev_t *mddev) sprintf(nm, "rd%d", rdev->raid_disk); sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); spares++; + md_new_event(mddev); } else break; } @@ -4324,9 +4851,9 @@ void md_check_recovery(mddev_t *mddev) mdname(mddev)); /* leave the spares where they are, it shouldn't hurt */ mddev->recovery = 0; - } else { + } else md_wakeup_thread(mddev->sync_thread); - } + md_new_event(mddev); } unlock: mddev_unlock(mddev); @@ -4503,12 +5030,14 @@ static int set_ro(const char *val, struct kernel_param *kp) int num = simple_strtoul(val, &e, 10); if (*val && (*e == '\0' || *e == '\n')) { start_readonly = num; - return 0;; + return 0; } return -EINVAL; } module_param_call(start_ro, set_ro, get_ro, NULL, 0600); +module_param(start_dirty_degraded, int, 0644); + EXPORT_SYMBOL(register_md_personality); EXPORT_SYMBOL(unregister_md_personality); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 145cdc5..e6aa309 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -35,15 +35,10 @@ #define NR_RESERVED_BUFS 32 -static mdk_personality_t multipath_personality; - - static void *mp_pool_alloc(gfp_t gfp_flags, void *data) { struct multipath_bh *mpb; - mpb = kmalloc(sizeof(*mpb), gfp_flags); - if (mpb) - memset(mpb, 0, sizeof(*mpb)); + mpb = kzalloc(sizeof(*mpb), gfp_flags); return mpb; } @@ -444,7 +439,7 @@ static int multipath_run (mddev_t *mddev) * should be freed in multipath_stop()] */ - conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL); + conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); mddev->private = conf; if (!conf) { printk(KERN_ERR @@ -452,9 +447,8 @@ static int multipath_run (mddev_t *mddev) mdname(mddev)); goto out; } - memset(conf, 0, sizeof(*conf)); - conf->multipaths = kmalloc(sizeof(struct multipath_info)*mddev->raid_disks, + conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->multipaths) { printk(KERN_ERR @@ -462,7 +456,6 @@ static int multipath_run (mddev_t *mddev) mdname(mddev)); goto out_free_conf; } - memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks); conf->working_disks = 0; ITERATE_RDEV(mddev,rdev,tmp) { @@ -557,9 +550,10 @@ static int multipath_stop (mddev_t *mddev) return 0; } -static mdk_personality_t multipath_personality= +static struct mdk_personality multipath_personality = { .name = "multipath", + .level = LEVEL_MULTIPATH, .owner = THIS_MODULE, .make_request = multipath_make_request, .run = multipath_run, @@ -572,15 +566,17 @@ static mdk_personality_t multipath_personality= static int __init multipath_init (void) { - return register_md_personality (MULTIPATH, &multipath_personality); + return register_md_personality (&multipath_personality); } static void __exit multipath_exit (void) { - unregister_md_personality (MULTIPATH); + unregister_md_personality (&multipath_personality); } module_init(multipath_init); module_exit(multipath_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ +MODULE_ALIAS("md-multipath"); +MODULE_ALIAS("md-level--4"); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index fece327..abbca15 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -113,21 +113,16 @@ static int create_strip_zones (mddev_t *mddev) } printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); - conf->strip_zone = kmalloc(sizeof(struct strip_zone)* + conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->nr_strip_zones, GFP_KERNEL); if (!conf->strip_zone) return 1; - conf->devlist = kmalloc(sizeof(mdk_rdev_t*)* + conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* conf->nr_strip_zones*mddev->raid_disks, GFP_KERNEL); if (!conf->devlist) return 1; - memset(conf->strip_zone, 0,sizeof(struct strip_zone)* - conf->nr_strip_zones); - memset(conf->devlist, 0, - sizeof(mdk_rdev_t*) * conf->nr_strip_zones * mddev->raid_disks); - /* The first zone must contain all devices, so here we check that * there is a proper alignment of slots to devices and find them all */ @@ -280,7 +275,11 @@ static int raid0_run (mddev_t *mddev) mdk_rdev_t *rdev; struct list_head *tmp; - printk("%s: setting max_sectors to %d, segment boundary to %d\n", + if (mddev->chunk_size == 0) { + printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); + return -EINVAL; + } + printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", mdname(mddev), mddev->chunk_size >> 9, (mddev->chunk_size>>1)-1); @@ -361,7 +360,7 @@ static int raid0_run (mddev_t *mddev) * chunksize should be used in that case. */ { - int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; + int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; } @@ -512,9 +511,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev) return; } -static mdk_personality_t raid0_personality= +static struct mdk_personality raid0_personality= { .name = "raid0", + .level = 0, .owner = THIS_MODULE, .make_request = raid0_make_request, .run = raid0_run, @@ -524,15 +524,17 @@ static mdk_personality_t raid0_personality= static int __init raid0_init (void) { - return register_md_personality (RAID0, &raid0_personality); + return register_md_personality (&raid0_personality); } static void raid0_exit (void) { - unregister_md_personality (RAID0); + unregister_md_personality (&raid0_personality); } module_init(raid0_init); module_exit(raid0_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-2"); /* RAID0 */ +MODULE_ALIAS("md-raid0"); +MODULE_ALIAS("md-level-0"); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 229d7b2..a06ff91 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -47,10 +47,11 @@ */ #define NR_RAID1_BIOS 256 -static mdk_personality_t raid1_personality; static void unplug_slaves(mddev_t *mddev); +static void allow_barrier(conf_t *conf); +static void lower_barrier(conf_t *conf); static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) { @@ -59,10 +60,8 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) int size = offsetof(r1bio_t, bios[pi->raid_disks]); /* allocate a r1bio with room for raid_disks entries in the bios array */ - r1_bio = kmalloc(size, gfp_flags); - if (r1_bio) - memset(r1_bio, 0, size); - else + r1_bio = kzalloc(size, gfp_flags); + if (!r1_bio) unplug_slaves(pi->mddev); return r1_bio; @@ -104,15 +103,30 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) } /* * Allocate RESYNC_PAGES data pages and attach them to - * the first bio; + * the first bio. + * If this is a user-requested check/repair, allocate + * RESYNC_PAGES for each bio. */ - bio = r1_bio->bios[0]; - for (i = 0; i < RESYNC_PAGES; i++) { - page = alloc_page(gfp_flags); - if (unlikely(!page)) - goto out_free_pages; - - bio->bi_io_vec[i].bv_page = page; + if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) + j = pi->raid_disks; + else + j = 1; + while(j--) { + bio = r1_bio->bios[j]; + for (i = 0; i < RESYNC_PAGES; i++) { + page = alloc_page(gfp_flags); + if (unlikely(!page)) + goto out_free_pages; + + bio->bi_io_vec[i].bv_page = page; + } + } + /* If not user-requests, copy the page pointers to all bios */ + if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { + for (i=0; i<RESYNC_PAGES ; i++) + for (j=1; j<pi->raid_disks; j++) + r1_bio->bios[j]->bi_io_vec[i].bv_page = + r1_bio->bios[0]->bi_io_vec[i].bv_page; } r1_bio->master_bio = NULL; @@ -120,8 +134,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) return r1_bio; out_free_pages: - for ( ; i > 0 ; i--) - __free_page(bio->bi_io_vec[i-1].bv_page); + for (i=0; i < RESYNC_PAGES ; i++) + for (j=0 ; j < pi->raid_disks; j++) + safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); + j = -1; out_free_bio: while ( ++j < pi->raid_disks ) bio_put(r1_bio->bios[j]); @@ -132,14 +148,16 @@ out_free_bio: static void r1buf_pool_free(void *__r1_bio, void *data) { struct pool_info *pi = data; - int i; + int i,j; r1bio_t *r1bio = __r1_bio; - struct bio *bio = r1bio->bios[0]; - for (i = 0; i < RESYNC_PAGES; i++) { - __free_page(bio->bi_io_vec[i].bv_page); - bio->bi_io_vec[i].bv_page = NULL; - } + for (i = 0; i < RESYNC_PAGES; i++) + for (j = pi->raid_disks; j-- ;) { + if (j == 0 || + r1bio->bios[j]->bi_io_vec[i].bv_page != + r1bio->bios[0]->bi_io_vec[i].bv_page) + safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); + } for (i=0 ; i < pi->raid_disks; i++) bio_put(r1bio->bios[i]); @@ -152,7 +170,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) for (i = 0; i < conf->raid_disks; i++) { struct bio **bio = r1_bio->bios + i; - if (*bio) + if (*bio && *bio != IO_BLOCKED) bio_put(*bio); *bio = NULL; } @@ -160,20 +178,13 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) static inline void free_r1bio(r1bio_t *r1_bio) { - unsigned long flags; - conf_t *conf = mddev_to_conf(r1_bio->mddev); /* * Wake up any possible resync thread that waits for the device * to go idle. */ - spin_lock_irqsave(&conf->resync_lock, flags); - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + allow_barrier(conf); put_all_bios(conf, r1_bio); mempool_free(r1_bio, conf->r1bio_pool); @@ -182,22 +193,17 @@ static inline void free_r1bio(r1bio_t *r1_bio) static inline void put_buf(r1bio_t *r1_bio) { conf_t *conf = mddev_to_conf(r1_bio->mddev); - unsigned long flags; + int i; - mempool_free(r1_bio, conf->r1buf_pool); + for (i=0; i<conf->raid_disks; i++) { + struct bio *bio = r1_bio->bios[i]; + if (bio->bi_end_io) + rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); + } - spin_lock_irqsave(&conf->resync_lock, flags); - if (!conf->barrier) - BUG(); - --conf->barrier; - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); + mempool_free(r1_bio, conf->r1buf_pool); - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + lower_barrier(conf); } static void reschedule_retry(r1bio_t *r1_bio) @@ -208,8 +214,10 @@ static void reschedule_retry(r1bio_t *r1_bio) spin_lock_irqsave(&conf->device_lock, flags); list_add(&r1_bio->retry_list, &conf->retry_list); + conf->nr_queued ++; spin_unlock_irqrestore(&conf->device_lock, flags); + wake_up(&conf->wait_barrier); md_wakeup_thread(mddev->thread); } @@ -261,9 +269,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int /* * this branch is our 'one mirror IO has finished' event handler: */ - if (!uptodate) - md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); - else + update_head_pos(mirror, r1_bio); + + if (uptodate || conf->working_disks <= 1) { /* * Set R1BIO_Uptodate in our master bio, so that * we will return a good error code for to the higher @@ -273,16 +281,11 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int * user-side. So if something waits for IO, then it will * wait for the 'master' bio. */ - set_bit(R1BIO_Uptodate, &r1_bio->state); - - update_head_pos(mirror, r1_bio); + if (uptodate) + set_bit(R1BIO_Uptodate, &r1_bio->state); - /* - * we have only one bio on the read side - */ - if (uptodate) raid_end_bio_io(r1_bio); - else { + } else { /* * oops, read error: */ @@ -378,7 +381,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int /* free extra copy of the data pages */ int i = bio->bi_vcnt; while (i--) - __free_page(bio->bi_io_vec[i].bv_page); + safe_put_page(bio->bi_io_vec[i].bv_page); } /* clear the bitmap if all writes complete successfully */ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, @@ -433,11 +436,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) new_disk = 0; for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); + r1_bio->bios[new_disk] == IO_BLOCKED || !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { - if (rdev && test_bit(In_sync, &rdev->flags)) + if (rdev && test_bit(In_sync, &rdev->flags) && + r1_bio->bios[new_disk] != IO_BLOCKED) wonly_disk = new_disk; if (new_disk == conf->raid_disks - 1) { @@ -451,11 +456,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* make sure the disk is operational */ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); + r1_bio->bios[new_disk] == IO_BLOCKED || !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { - if (rdev && test_bit(In_sync, &rdev->flags)) + if (rdev && test_bit(In_sync, &rdev->flags) && + r1_bio->bios[new_disk] != IO_BLOCKED) wonly_disk = new_disk; if (new_disk <= 0) @@ -492,7 +499,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) rdev = rcu_dereference(conf->mirrors[disk].rdev); - if (!rdev || + if (!rdev || r1_bio->bios[disk] == IO_BLOCKED || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags)) continue; @@ -520,7 +527,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* cannot risk returning a device that failed * before we inc'ed nr_pending */ - atomic_dec(&rdev->nr_pending); + rdev_dec_pending(rdev, conf->mddev); goto retry; } conf->next_seq_sect = this_sector + sectors; @@ -593,42 +600,119 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, return ret; } -/* - * Throttle resync depth, so that we can both get proper overlapping of - * requests, but are still able to handle normal requests quickly. +/* Barriers.... + * Sometimes we need to suspend IO while we do something else, + * either some resync/recovery, or reconfigure the array. + * To do this we raise a 'barrier'. + * The 'barrier' is a counter that can be raised multiple times + * to count how many activities are happening which preclude + * normal IO. + * We can only raise the barrier if there is no pending IO. + * i.e. if nr_pending == 0. + * We choose only to raise the barrier if no-one is waiting for the + * barrier to go down. This means that as soon as an IO request + * is ready, no other operations which require a barrier will start + * until the IO request has had a chance. + * + * So: regular IO calls 'wait_barrier'. When that returns there + * is no backgroup IO happening, It must arrange to call + * allow_barrier when it has finished its IO. + * backgroup IO calls must call raise_barrier. Once that returns + * there is no normal IO happeing. It must arrange to call + * lower_barrier when the particular background IO completes. */ #define RESYNC_DEPTH 32 -static void device_barrier(conf_t *conf, sector_t sect) +static void raise_barrier(conf_t *conf) { spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - - if (!conf->barrier++) { - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - if (conf->nr_pending) - BUG(); + + /* Wait until no block IO is waiting */ + wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + + /* block any new IO from starting */ + conf->barrier++; + + /* No wait for all pending IO to complete */ + wait_event_lock_irq(conf->wait_barrier, + !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + + spin_unlock_irq(&conf->resync_lock); +} + +static void lower_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->barrier--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void wait_barrier(conf_t *conf) +{ + spin_lock_irq(&conf->resync_lock); + if (conf->barrier) { + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + conf->nr_waiting--; } - wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - conf->next_resync = sect; + conf->nr_pending++; + spin_unlock_irq(&conf->resync_lock); +} + +static void allow_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->nr_pending--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void freeze_array(conf_t *conf) +{ + /* stop syncio and normal IO and wait for everything to + * go quite. + * We increment barrier and nr_waiting, and then + * wait until barrier+nr_pending match nr_queued+2 + */ + spin_lock_irq(&conf->resync_lock); + conf->barrier++; + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, + conf->barrier+conf->nr_pending == conf->nr_queued+2, + conf->resync_lock, + raid1_unplug(conf->mddev->queue)); + spin_unlock_irq(&conf->resync_lock); +} +static void unfreeze_array(conf_t *conf) +{ + /* reverse the effect of the freeze */ + spin_lock_irq(&conf->resync_lock); + conf->barrier--; + conf->nr_waiting--; + wake_up(&conf->wait_barrier); spin_unlock_irq(&conf->resync_lock); } + /* duplicate the data pages for behind I/O */ static struct page **alloc_behind_pages(struct bio *bio) { int i; struct bio_vec *bvec; - struct page **pages = kmalloc(bio->bi_vcnt * sizeof(struct page *), + struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *), GFP_NOIO); if (unlikely(!pages)) goto do_sync_io; - memset(pages, 0, bio->bi_vcnt * sizeof(struct page *)); - bio_for_each_segment(bvec, bio, i) { pages[i] = alloc_page(GFP_NOIO); if (unlikely(!pages[i])) @@ -644,7 +728,7 @@ static struct page **alloc_behind_pages(struct bio *bio) do_sync_io: if (pages) for (i = 0; i < bio->bi_vcnt && pages[i]; i++) - __free_page(pages[i]); + put_page(pages[i]); kfree(pages); PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); return NULL; @@ -678,10 +762,7 @@ static int make_request(request_queue_t *q, struct bio * bio) */ md_write_start(mddev, bio); /* wait on superblock update early */ - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, ); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); + wait_barrier(conf); disk_stat_inc(mddev->gendisk, ios[rw]); disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); @@ -749,7 +830,7 @@ static int make_request(request_queue_t *q, struct bio * bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); if (test_bit(Faulty, &rdev->flags)) { - atomic_dec(&rdev->nr_pending); + rdev_dec_pending(rdev, mddev); r1_bio->bios[i] = NULL; } else r1_bio->bios[i] = bio; @@ -909,13 +990,8 @@ static void print_conf(conf_t *conf) static void close_sync(conf_t *conf) { - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, - conf->resync_lock, raid1_unplug(conf->mddev->queue)); - spin_unlock_irq(&conf->resync_lock); - - if (conf->barrier) BUG(); - if (waitqueue_active(&conf->wait_idle)) BUG(); + wait_barrier(conf); + allow_barrier(conf); mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; @@ -1015,28 +1091,27 @@ abort: static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); - conf_t *conf = mddev_to_conf(r1_bio->mddev); + int i; if (bio->bi_size) return 1; - if (r1_bio->bios[r1_bio->read_disk] != bio) - BUG(); - update_head_pos(r1_bio->read_disk, r1_bio); + for (i=r1_bio->mddev->raid_disks; i--; ) + if (r1_bio->bios[i] == bio) + break; + BUG_ON(i < 0); + update_head_pos(i, r1_bio); /* * we have read a block, now it needs to be re-written, * or re-read if the read failed. * We don't do much here, just schedule handling by raid1d */ - if (!uptodate) { - md_error(r1_bio->mddev, - conf->mirrors[r1_bio->read_disk].rdev); - } else + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R1BIO_Uptodate, &r1_bio->state); - rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); - reschedule_retry(r1_bio); + + if (atomic_dec_and_test(&r1_bio->remaining)) + reschedule_retry(r1_bio); return 0; } @@ -1066,7 +1141,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) md_done_sync(mddev, r1_bio->sectors, uptodate); put_buf(r1_bio); } - rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); return 0; } @@ -1079,34 +1153,173 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) bio = r1_bio->bios[r1_bio->read_disk]; -/* - if (r1_bio->sector == 0) printk("First sync write startss\n"); -*/ - /* - * schedule writes - */ + + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* We have read all readable devices. If we haven't + * got the block, then there is no hope left. + * If we have, then we want to do a comparison + * and skip the write if everything is the same. + * If any blocks failed to read, then we need to + * attempt an over-write + */ + int primary; + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { + for (i=0; i<mddev->raid_disks; i++) + if (r1_bio->bios[i]->bi_end_io == end_sync_read) + md_error(mddev, conf->mirrors[i].rdev); + + md_done_sync(mddev, r1_bio->sectors, 1); + put_buf(r1_bio); + return; + } + for (primary=0; primary<mddev->raid_disks; primary++) + if (r1_bio->bios[primary]->bi_end_io == end_sync_read && + test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { + r1_bio->bios[primary]->bi_end_io = NULL; + rdev_dec_pending(conf->mirrors[primary].rdev, mddev); + break; + } + r1_bio->read_disk = primary; + for (i=0; i<mddev->raid_disks; i++) + if (r1_bio->bios[i]->bi_end_io == end_sync_read && + test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) { + int j; + int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); + struct bio *pbio = r1_bio->bios[primary]; + struct bio *sbio = r1_bio->bios[i]; + for (j = vcnt; j-- ; ) + if (memcmp(page_address(pbio->bi_io_vec[j].bv_page), + page_address(sbio->bi_io_vec[j].bv_page), + PAGE_SIZE)) + break; + if (j >= 0) + mddev->resync_mismatches += r1_bio->sectors; + if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { + sbio->bi_end_io = NULL; + rdev_dec_pending(conf->mirrors[i].rdev, mddev); + } else { + /* fixup the bio for reuse */ + sbio->bi_vcnt = vcnt; + sbio->bi_size = r1_bio->sectors << 9; + sbio->bi_idx = 0; + sbio->bi_phys_segments = 0; + sbio->bi_hw_segments = 0; + sbio->bi_hw_front_size = 0; + sbio->bi_hw_back_size = 0; + sbio->bi_flags &= ~(BIO_POOL_MASK - 1); + sbio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bi_next = NULL; + sbio->bi_sector = r1_bio->sector + + conf->mirrors[i].rdev->data_offset; + sbio->bi_bdev = conf->mirrors[i].rdev->bdev; + } + } + } if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { - /* - * There is no point trying a read-for-reconstruct as - * reconstruct is about to be aborted + /* ouch - failed to read all of that. + * Try some synchronous reads of other devices to get + * good data, much like with normal read errors. Only + * read into the pages we already have so they we don't + * need to re-issue the read request. + * We don't need to freeze the array, because being in an + * active sync request, there is no normal IO, and + * no overlapping syncs. */ - char b[BDEVNAME_SIZE]; - printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" - " for block %llu\n", - bdevname(bio->bi_bdev,b), - (unsigned long long)r1_bio->sector); - md_done_sync(mddev, r1_bio->sectors, 0); - put_buf(r1_bio); - return; + sector_t sect = r1_bio->sector; + int sectors = r1_bio->sectors; + int idx = 0; + + while(sectors) { + int s = sectors; + int d = r1_bio->read_disk; + int success = 0; + mdk_rdev_t *rdev; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + do { + if (r1_bio->bios[d]->bi_end_io == end_sync_read) { + rdev = conf->mirrors[d].rdev; + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + READ)) { + success = 1; + break; + } + } + d++; + if (d == conf->raid_disks) + d = 0; + } while (!success && d != r1_bio->read_disk); + + if (success) { + int start = d; + /* write it back and re-read */ + set_bit(R1BIO_Uptodate, &r1_bio->state); + while (d != r1_bio->read_disk) { + if (d == 0) + d = conf->raid_disks; + d--; + if (r1_bio->bios[d]->bi_end_io != end_sync_read) + continue; + rdev = conf->mirrors[d].rdev; + atomic_add(s, &rdev->corrected_errors); + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + WRITE) == 0) + md_error(mddev, rdev); + } + d = start; + while (d != r1_bio->read_disk) { + if (d == 0) + d = conf->raid_disks; + d--; + if (r1_bio->bios[d]->bi_end_io != end_sync_read) + continue; + rdev = conf->mirrors[d].rdev; + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + bio->bi_io_vec[idx].bv_page, + READ) == 0) + md_error(mddev, rdev); + } + } else { + char b[BDEVNAME_SIZE]; + /* Cannot read from anywhere, array is toast */ + md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); + printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" + " for block %llu\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)r1_bio->sector); + md_done_sync(mddev, r1_bio->sectors, 0); + put_buf(r1_bio); + return; + } + sectors -= s; + sect += s; + idx ++; + } } + /* + * schedule writes + */ atomic_set(&r1_bio->remaining, 1); for (i = 0; i < disks ; i++) { wbio = r1_bio->bios[i]; - if (wbio->bi_end_io != end_sync_write) + if (wbio->bi_end_io == NULL || + (wbio->bi_end_io == end_sync_read && + (i == r1_bio->read_disk || + !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; - atomic_inc(&conf->mirrors[i].rdev->nr_pending); + wbio->bi_rw = WRITE; + wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); @@ -1167,6 +1380,7 @@ static void raid1d(mddev_t *mddev) break; r1_bio = list_entry(head->prev, r1bio_t, retry_list); list_del(head->prev); + conf->nr_queued--; spin_unlock_irqrestore(&conf->device_lock, flags); mddev = r1_bio->mddev; @@ -1206,6 +1420,86 @@ static void raid1d(mddev_t *mddev) } } else { int disk; + + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen + */ + sector_t sect = r1_bio->sector; + int sectors = r1_bio->sectors; + freeze_array(conf); + if (mddev->ro == 0) while(sectors) { + int s = sectors; + int d = r1_bio->read_disk; + int success = 0; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + + do { + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags) && + sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, + conf->tmppage, READ)) + success = 1; + else { + d++; + if (d == conf->raid_disks) + d = 0; + } + } while (!success && d != r1_bio->read_disk); + + if (success) { + /* write it back and re-read */ + int start = d; + while (d != r1_bio->read_disk) { + if (d==0) + d = conf->raid_disks; + d--; + rdev = conf->mirrors[d].rdev; + atomic_add(s, &rdev->corrected_errors); + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, conf->tmppage, WRITE) == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + d = start; + while (d != r1_bio->read_disk) { + if (d==0) + d = conf->raid_disks; + d--; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, conf->tmppage, READ) == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + } else { + /* Cannot read from anywhere -- bye bye array */ + md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); + break; + } + sectors -= s; + sect += s; + } + + unfreeze_array(conf); + bio = r1_bio->bios[r1_bio->read_disk]; if ((disk=read_balance(conf, r1_bio)) == -1) { printk(KERN_ALERT "raid1: %s: unrecoverable I/O" @@ -1214,7 +1508,8 @@ static void raid1d(mddev_t *mddev) (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { - r1_bio->bios[r1_bio->read_disk] = NULL; + r1_bio->bios[r1_bio->read_disk] = + mddev->ro ? IO_BLOCKED : NULL; r1_bio->read_disk = disk; bio_put(bio); bio = bio_clone(r1_bio->master_bio, GFP_NOIO); @@ -1269,14 +1564,13 @@ static int init_resync(conf_t *conf) static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) { conf_t *conf = mddev_to_conf(mddev); - mirror_info_t *mirror; r1bio_t *r1_bio; struct bio *bio; sector_t max_sector, nr_sectors; - int disk; + int disk = -1; int i; - int wonly; - int write_targets = 0; + int wonly = -1; + int write_targets = 0, read_targets = 0; int sync_blocks; int still_degraded = 0; @@ -1317,55 +1611,35 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return sync_blocks; } /* - * If there is non-resync activity waiting for us then - * put in a delay to throttle resync. + * If there is non-resync activity waiting for a turn, + * and resync is going fast enough, + * then let it though before starting on this new sync request. */ - if (!go_faster && waitqueue_active(&conf->wait_resume)) + if (!go_faster && conf->nr_waiting) msleep_interruptible(1000); - device_barrier(conf, sector_nr + RESYNC_SECTORS); - - /* - * If reconstructing, and >1 working disc, - * could dedicate one to rebuild and others to - * service read requests .. - */ - disk = conf->last_used; - /* make sure disk is operational */ - wonly = disk; - while (conf->mirrors[disk].rdev == NULL || - !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) || - test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags) - ) { - if (conf->mirrors[disk].rdev && - test_bit(In_sync, &conf->mirrors[disk].rdev->flags)) - wonly = disk; - if (disk <= 0) - disk = conf->raid_disks; - disk--; - if (disk == conf->last_used) { - disk = wonly; - break; - } - } - conf->last_used = disk; - atomic_inc(&conf->mirrors[disk].rdev->nr_pending); + raise_barrier(conf); - mirror = conf->mirrors + disk; + conf->next_resync = sector_nr; r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); - - spin_lock_irq(&conf->resync_lock); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); + rcu_read_lock(); + /* + * If we get a correctably read error during resync or recovery, + * we might want to read from a different device. So we + * flag all drives that could conceivably be read from for READ, + * and any others (which will be non-In_sync devices) for WRITE. + * If a read fails, we try reading from something else for which READ + * is OK. + */ r1_bio->mddev = mddev; r1_bio->sector = sector_nr; r1_bio->state = 0; set_bit(R1BIO_IsSync, &r1_bio->state); - r1_bio->read_disk = disk; for (i=0; i < conf->raid_disks; i++) { + mdk_rdev_t *rdev; bio = r1_bio->bios[i]; /* take from bio_init */ @@ -1380,35 +1654,49 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i bio->bi_end_io = NULL; bio->bi_private = NULL; - if (i == disk) { - bio->bi_rw = READ; - bio->bi_end_io = end_sync_read; - } else if (conf->mirrors[i].rdev == NULL || - test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { + rdev = rcu_dereference(conf->mirrors[i].rdev); + if (rdev == NULL || + test_bit(Faulty, &rdev->flags)) { still_degraded = 1; continue; - } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) || - sector_nr + RESYNC_SECTORS > mddev->recovery_cp || - test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + } else if (!test_bit(In_sync, &rdev->flags)) { bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; write_targets ++; - } else - /* no need to read or write here */ - continue; - bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset; - bio->bi_bdev = conf->mirrors[i].rdev->bdev; + } else { + /* may need to read from here */ + bio->bi_rw = READ; + bio->bi_end_io = end_sync_read; + if (test_bit(WriteMostly, &rdev->flags)) { + if (wonly < 0) + wonly = i; + } else { + if (disk < 0) + disk = i; + } + read_targets++; + } + atomic_inc(&rdev->nr_pending); + bio->bi_sector = sector_nr + rdev->data_offset; + bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; } + rcu_read_unlock(); + if (disk < 0) + disk = wonly; + r1_bio->read_disk = disk; + + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) + /* extra read targets are also write targets */ + write_targets += read_targets-1; - if (write_targets == 0) { + if (write_targets == 0 || read_targets == 0) { /* There is nowhere to write, so all non-sync * drives must be failed - so we are finished */ sector_t rv = max_sector - sector_nr; *skipped = 1; put_buf(r1_bio); - rdev_dec_pending(conf->mirrors[disk].rdev, mddev); return rv; } @@ -1436,10 +1724,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (i=0 ; i < conf->raid_disks; i++) { bio = r1_bio->bios[i]; if (bio->bi_end_io) { - page = r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page; + page = bio->bi_io_vec[bio->bi_vcnt].bv_page; if (bio_add_page(bio, page, len, 0) == 0) { /* stop here */ - r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page = page; + bio->bi_io_vec[bio->bi_vcnt].bv_page = page; while (i > 0) { i--; bio = r1_bio->bios[i]; @@ -1459,12 +1747,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i sync_blocks -= (len>>9); } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); bio_full: - bio = r1_bio->bios[disk]; r1_bio->sectors = nr_sectors; - md_sync_acct(mirror->rdev->bdev, nr_sectors); + /* For a user-requested sync, we read all readable devices and do a + * compare + */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + atomic_set(&r1_bio->remaining, read_targets); + for (i=0; i<conf->raid_disks; i++) { + bio = r1_bio->bios[i]; + if (bio->bi_end_io == end_sync_read) { + md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors); + generic_make_request(bio); + } + } + } else { + atomic_set(&r1_bio->remaining, 1); + bio = r1_bio->bios[r1_bio->read_disk]; + md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev, + nr_sectors); + generic_make_request(bio); - generic_make_request(bio); + } return nr_sectors; } @@ -1487,18 +1791,19 @@ static int run(mddev_t *mddev) * bookkeeping area. [whatever we allocate in run(), * should be freed in stop()] */ - conf = kmalloc(sizeof(conf_t), GFP_KERNEL); + conf = kzalloc(sizeof(conf_t), GFP_KERNEL); mddev->private = conf; if (!conf) goto out_no_mem; - memset(conf, 0, sizeof(*conf)); - conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks, + conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->mirrors) goto out_no_mem; - memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); + conf->tmppage = alloc_page(GFP_KERNEL); + if (!conf->tmppage) + goto out_no_mem; conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); if (!conf->poolinfo) @@ -1542,8 +1847,7 @@ static int run(mddev_t *mddev) mddev->recovery_cp = MaxSector; spin_lock_init(&conf->resync_lock); - init_waitqueue_head(&conf->wait_idle); - init_waitqueue_head(&conf->wait_resume); + init_waitqueue_head(&conf->wait_barrier); bio_list_init(&conf->pending_bio_list); bio_list_init(&conf->flushing_bio_list); @@ -1583,7 +1887,6 @@ static int run(mddev_t *mddev) mdname(mddev)); goto out_free_conf; } - if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; printk(KERN_INFO "raid1: raid set %s active with %d out of %d mirrors\n", @@ -1608,6 +1911,7 @@ out_free_conf: if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); + safe_put_page(conf->tmppage); kfree(conf->poolinfo); kfree(conf); mddev->private = NULL; @@ -1706,19 +2010,14 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks) kfree(newpoolinfo); return -ENOMEM; } - newmirrors = kmalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); + newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); mempool_destroy(newpool); return -ENOMEM; } - memset(newmirrors, 0, sizeof(struct mirror_info)*raid_disks); - spin_lock_irq(&conf->resync_lock); - conf->barrier++; - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, raid1_unplug(mddev->queue)); - spin_unlock_irq(&conf->resync_lock); + raise_barrier(conf); /* ok, everything is stopped */ oldpool = conf->r1bio_pool; @@ -1738,12 +2037,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks) conf->raid_disks = mddev->raid_disks = raid_disks; conf->last_used = 0; /* just make sure it is in-range */ - spin_lock_irq(&conf->resync_lock); - conf->barrier--; - spin_unlock_irq(&conf->resync_lock); - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); - + lower_barrier(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); @@ -1758,33 +2052,19 @@ static void raid1_quiesce(mddev_t *mddev, int state) switch(state) { case 1: - spin_lock_irq(&conf->resync_lock); - conf->barrier++; - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, raid1_unplug(mddev->queue)); - spin_unlock_irq(&conf->resync_lock); + raise_barrier(conf); break; case 0: - spin_lock_irq(&conf->resync_lock); - conf->barrier--; - spin_unlock_irq(&conf->resync_lock); - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); + lower_barrier(conf); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } -static mdk_personality_t raid1_personality = +static struct mdk_personality raid1_personality = { .name = "raid1", + .level = 1, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -1802,15 +2082,17 @@ static mdk_personality_t raid1_personality = static int __init raid_init(void) { - return register_md_personality(RAID1, &raid1_personality); + return register_md_personality(&raid1_personality); } static void raid_exit(void) { - unregister_md_personality(RAID1); + unregister_md_personality(&raid1_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-3"); /* RAID1 */ +MODULE_ALIAS("md-raid1"); +MODULE_ALIAS("md-level-1"); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 713dc9c..9e658e5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -18,7 +18,9 @@ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include "dm-bio-list.h" #include <linux/raid/raid10.h> +#include <linux/raid/bitmap.h> /* * RAID10 provides a combination of RAID0 and RAID1 functionality. @@ -47,6 +49,9 @@ static void unplug_slaves(mddev_t *mddev); +static void allow_barrier(conf_t *conf); +static void lower_barrier(conf_t *conf); + static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) { conf_t *conf = data; @@ -54,10 +59,8 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) int size = offsetof(struct r10bio_s, devs[conf->copies]); /* allocate a r10bio with room for raid_disks entries in the bios array */ - r10_bio = kmalloc(size, gfp_flags); - if (r10_bio) - memset(r10_bio, 0, size); - else + r10_bio = kzalloc(size, gfp_flags); + if (!r10_bio) unplug_slaves(conf->mddev); return r10_bio; @@ -129,10 +132,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: for ( ; i > 0 ; i--) - __free_page(bio->bi_io_vec[i-1].bv_page); + safe_put_page(bio->bi_io_vec[i-1].bv_page); while (j--) for (i = 0; i < RESYNC_PAGES ; i++) - __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); + safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); j = -1; out_free_bio: while ( ++j < nalloc ) @@ -152,7 +155,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data) struct bio *bio = r10bio->devs[j].bio; if (bio) { for (i = 0; i < RESYNC_PAGES; i++) { - __free_page(bio->bi_io_vec[i].bv_page); + safe_put_page(bio->bi_io_vec[i].bv_page); bio->bi_io_vec[i].bv_page = NULL; } bio_put(bio); @@ -167,7 +170,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) for (i = 0; i < conf->copies; i++) { struct bio **bio = & r10_bio->devs[i].bio; - if (*bio) + if (*bio && *bio != IO_BLOCKED) bio_put(*bio); *bio = NULL; } @@ -175,20 +178,13 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) static inline void free_r10bio(r10bio_t *r10_bio) { - unsigned long flags; - conf_t *conf = mddev_to_conf(r10_bio->mddev); /* * Wake up any possible resync thread that waits for the device * to go idle. */ - spin_lock_irqsave(&conf->resync_lock, flags); - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + allow_barrier(conf); put_all_bios(conf, r10_bio); mempool_free(r10_bio, conf->r10bio_pool); @@ -197,22 +193,10 @@ static inline void free_r10bio(r10bio_t *r10_bio) static inline void put_buf(r10bio_t *r10_bio) { conf_t *conf = mddev_to_conf(r10_bio->mddev); - unsigned long flags; mempool_free(r10_bio, conf->r10buf_pool); - spin_lock_irqsave(&conf->resync_lock, flags); - if (!conf->barrier) - BUG(); - --conf->barrier; - wake_up(&conf->wait_resume); - wake_up(&conf->wait_idle); - - if (!--conf->nr_pending) { - wake_up(&conf->wait_idle); - wake_up(&conf->wait_resume); - } - spin_unlock_irqrestore(&conf->resync_lock, flags); + lower_barrier(conf); } static void reschedule_retry(r10bio_t *r10_bio) @@ -223,6 +207,7 @@ static void reschedule_retry(r10bio_t *r10_bio) spin_lock_irqsave(&conf->device_lock, flags); list_add(&r10_bio->retry_list, &conf->retry_list); + conf->nr_queued ++; spin_unlock_irqrestore(&conf->device_lock, flags); md_wakeup_thread(mddev->thread); @@ -268,9 +253,9 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int /* * this branch is our 'one mirror IO has finished' event handler: */ - if (!uptodate) - md_error(r10_bio->mddev, conf->mirrors[dev].rdev); - else + update_head_pos(slot, r10_bio); + + if (uptodate) { /* * Set R10BIO_Uptodate in our master bio, so that * we will return a good error code to the higher @@ -281,15 +266,8 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int * wait for the 'master' bio. */ set_bit(R10BIO_Uptodate, &r10_bio->state); - - update_head_pos(slot, r10_bio); - - /* - * we have only one bio on the read side - */ - if (uptodate) raid_end_bio_io(r10_bio); - else { + } else { /* * oops, read error: */ @@ -322,9 +300,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in /* * this branch is our 'one mirror IO has finished' event handler: */ - if (!uptodate) + if (!uptodate) { md_error(r10_bio->mddev, conf->mirrors[dev].rdev); - else + /* an I/O failed, we can't clear the bitmap */ + set_bit(R10BIO_Degraded, &r10_bio->state); + } else /* * Set R10BIO_Uptodate in our master bio, so that * we will return a good error code for to the higher @@ -344,6 +324,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in * already. */ if (atomic_dec_and_test(&r10_bio->remaining)) { + /* clear the bitmap if all writes complete successfully */ + bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, + r10_bio->sectors, + !test_bit(R10BIO_Degraded, &r10_bio->state), + 0); md_write_end(r10_bio->mddev); raid_end_bio_io(r10_bio); } @@ -502,8 +487,9 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) rcu_read_lock(); /* * Check if we can balance. We can balance on the whole - * device if no resync is going on, or below the resync window. - * We take the first readable disk when above the resync window. + * device if no resync is going on (recovery is ok), or below + * the resync window. We take the first readable disk when + * above the resync window. */ if (conf->mddev->recovery_cp < MaxSector && (this_sector + sectors >= conf->next_resync)) { @@ -512,6 +498,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) disk = r10_bio->devs[slot].devnum; while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + r10_bio->devs[slot].bio == IO_BLOCKED || !test_bit(In_sync, &rdev->flags)) { slot++; if (slot == conf->copies) { @@ -529,6 +516,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) slot = 0; disk = r10_bio->devs[slot].devnum; while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + r10_bio->devs[slot].bio == IO_BLOCKED || !test_bit(In_sync, &rdev->flags)) { slot ++; if (slot == conf->copies) { @@ -549,6 +537,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || + r10_bio->devs[nslot].bio == IO_BLOCKED || !test_bit(In_sync, &rdev->flags)) continue; @@ -607,7 +596,10 @@ static void unplug_slaves(mddev_t *mddev) static void raid10_unplug(request_queue_t *q) { + mddev_t *mddev = q->queuedata; + unplug_slaves(q->queuedata); + md_wakeup_thread(mddev->thread); } static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, @@ -640,27 +632,107 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, return ret; } -/* - * Throttle resync depth, so that we can both get proper overlapping of - * requests, but are still able to handle normal requests quickly. +/* Barriers.... + * Sometimes we need to suspend IO while we do something else, + * either some resync/recovery, or reconfigure the array. + * To do this we raise a 'barrier'. + * The 'barrier' is a counter that can be raised multiple times + * to count how many activities are happening which preclude + * normal IO. + * We can only raise the barrier if there is no pending IO. + * i.e. if nr_pending == 0. + * We choose only to raise the barrier if no-one is waiting for the + * barrier to go down. This means that as soon as an IO request + * is ready, no other operations which require a barrier will start + * until the IO request has had a chance. + * + * So: regular IO calls 'wait_barrier'. When that returns there + * is no backgroup IO happening, It must arrange to call + * allow_barrier when it has finished its IO. + * backgroup IO calls must call raise_barrier. Once that returns + * there is no normal IO happeing. It must arrange to call + * lower_barrier when the particular background IO completes. */ #define RESYNC_DEPTH 32 -static void device_barrier(conf_t *conf, sector_t sect) +static void raise_barrier(conf_t *conf, int force) +{ + BUG_ON(force && !conf->barrier); + spin_lock_irq(&conf->resync_lock); + + /* Wait until no block IO is waiting (unless 'force') */ + wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + + /* block any new IO from starting */ + conf->barrier++; + + /* No wait for all pending IO to complete */ + wait_event_lock_irq(conf->wait_barrier, + !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + + spin_unlock_irq(&conf->resync_lock); +} + +static void lower_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->barrier--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void wait_barrier(conf_t *conf) { spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), - conf->resync_lock, unplug_slaves(conf->mddev)); - - if (!conf->barrier++) { - wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, - conf->resync_lock, unplug_slaves(conf->mddev)); - if (conf->nr_pending) - BUG(); + if (conf->barrier) { + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + conf->nr_waiting--; } - wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, - conf->resync_lock, unplug_slaves(conf->mddev)); - conf->next_resync = sect; + conf->nr_pending++; + spin_unlock_irq(&conf->resync_lock); +} + +static void allow_barrier(conf_t *conf) +{ + unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); + conf->nr_pending--; + spin_unlock_irqrestore(&conf->resync_lock, flags); + wake_up(&conf->wait_barrier); +} + +static void freeze_array(conf_t *conf) +{ + /* stop syncio and normal IO and wait for everything to + * go quiet. + * We increment barrier and nr_waiting, and then + * wait until barrier+nr_pending match nr_queued+2 + */ + spin_lock_irq(&conf->resync_lock); + conf->barrier++; + conf->nr_waiting++; + wait_event_lock_irq(conf->wait_barrier, + conf->barrier+conf->nr_pending == conf->nr_queued+2, + conf->resync_lock, + raid10_unplug(conf->mddev->queue)); + spin_unlock_irq(&conf->resync_lock); +} + +static void unfreeze_array(conf_t *conf) +{ + /* reverse the effect of the freeze */ + spin_lock_irq(&conf->resync_lock); + conf->barrier--; + conf->nr_waiting--; + wake_up(&conf->wait_barrier); spin_unlock_irq(&conf->resync_lock); } @@ -674,6 +746,8 @@ static int make_request(request_queue_t *q, struct bio * bio) int i; int chunk_sects = conf->chunk_mask + 1; const int rw = bio_data_dir(bio); + struct bio_list bl; + unsigned long flags; if (unlikely(bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); @@ -719,10 +793,7 @@ static int make_request(request_queue_t *q, struct bio * bio) * thread has put up a bar for new requests. * Continue immediately if no resync is active currently. */ - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, ); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); + wait_barrier(conf); disk_stat_inc(mddev->gendisk, ios[rw]); disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); @@ -734,6 +805,7 @@ static int make_request(request_queue_t *q, struct bio * bio) r10_bio->mddev = mddev; r10_bio->sector = bio->bi_sector; + r10_bio->state = 0; if (rw == READ) { /* @@ -778,13 +850,16 @@ static int make_request(request_queue_t *q, struct bio * bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); r10_bio->devs[i].bio = bio; - } else + } else { r10_bio->devs[i].bio = NULL; + set_bit(R10BIO_Degraded, &r10_bio->state); + } } rcu_read_unlock(); - atomic_set(&r10_bio->remaining, 1); + atomic_set(&r10_bio->remaining, 0); + bio_list_init(&bl); for (i = 0; i < conf->copies; i++) { struct bio *mbio; int d = r10_bio->devs[i].devnum; @@ -802,13 +877,14 @@ static int make_request(request_queue_t *q, struct bio * bio) mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); - generic_make_request(mbio); + bio_list_add(&bl, mbio); } - if (atomic_dec_and_test(&r10_bio->remaining)) { - md_write_end(mddev); - raid_end_bio_io(r10_bio); - } + bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0); + spin_lock_irqsave(&conf->device_lock, flags); + bio_list_merge(&conf->pending_bio_list, &bl); + blk_plug_device(mddev->queue); + spin_unlock_irqrestore(&conf->device_lock, flags); return 0; } @@ -897,13 +973,8 @@ static void print_conf(conf_t *conf) static void close_sync(conf_t *conf) { - spin_lock_irq(&conf->resync_lock); - wait_event_lock_irq(conf->wait_resume, !conf->barrier, - conf->resync_lock, unplug_slaves(conf->mddev)); - spin_unlock_irq(&conf->resync_lock); - - if (conf->barrier) BUG(); - if (waitqueue_active(&conf->wait_idle)) BUG(); + wait_barrier(conf); + allow_barrier(conf); mempool_destroy(conf->r10buf_pool); conf->r10buf_pool = NULL; @@ -971,7 +1042,12 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) if (!enough(conf)) return 0; - for (mirror=0; mirror < mddev->raid_disks; mirror++) + if (rdev->saved_raid_disk >= 0 && + conf->mirrors[rdev->saved_raid_disk].rdev == NULL) + mirror = rdev->saved_raid_disk; + else + mirror = 0; + for ( ; mirror < mddev->raid_disks; mirror++) if ( !(p=conf->mirrors+mirror)->rdev) { blk_queue_stack_limits(mddev->queue, @@ -987,6 +1063,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) p->head_position = 0; rdev->raid_disk = mirror; found = 1; + if (rdev->saved_raid_disk != mirror) + conf->fullsync = 1; rcu_assign_pointer(p->rdev, rdev); break; } @@ -1027,7 +1105,6 @@ abort: static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); conf_t *conf = mddev_to_conf(r10_bio->mddev); int i,d; @@ -1042,9 +1119,16 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) BUG(); update_head_pos(i, r10_bio); d = r10_bio->devs[i].devnum; - if (!uptodate) - md_error(r10_bio->mddev, - conf->mirrors[d].rdev); + + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + set_bit(R10BIO_Uptodate, &r10_bio->state); + else { + atomic_add(r10_bio->sectors, + &conf->mirrors[d].rdev->corrected_errors); + if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) + md_error(r10_bio->mddev, + conf->mirrors[d].rdev); + } /* for reconstruct, we always reschedule after a read. * for resync, only after all reads @@ -1132,23 +1216,32 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) fbio = r10_bio->devs[i].bio; /* now find blocks with errors */ - for (i=first+1 ; i < conf->copies ; i++) { - int vcnt, j, d; + for (i=0 ; i < conf->copies ; i++) { + int j, d; + int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); - if (!test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) - continue; - /* We know that the bi_io_vec layout is the same for - * both 'first' and 'i', so we just compare them. - * All vec entries are PAGE_SIZE; - */ tbio = r10_bio->devs[i].bio; - vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); - for (j = 0; j < vcnt; j++) - if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), - page_address(tbio->bi_io_vec[j].bv_page), - PAGE_SIZE)) - break; - if (j == vcnt) + + if (tbio->bi_end_io != end_sync_read) + continue; + if (i == first) + continue; + if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { + /* We know that the bi_io_vec layout is the same for + * both 'first' and 'i', so we just compare them. + * All vec entries are PAGE_SIZE; + */ + for (j = 0; j < vcnt; j++) + if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), + page_address(tbio->bi_io_vec[j].bv_page), + PAGE_SIZE)) + break; + if (j == vcnt) + continue; + mddev->resync_mismatches += r10_bio->sectors; + } + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + /* Don't fix anything. */ continue; /* Ok, we need to write this bio * First we need to fixup bv_offset, bv_len and @@ -1227,7 +1320,10 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) atomic_inc(&conf->mirrors[d].rdev->nr_pending); md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); - generic_make_request(wbio); + if (test_bit(R10BIO_Uptodate, &r10_bio->state)) + generic_make_request(wbio); + else + bio_endio(wbio, wbio->bi_size, -EIO); } @@ -1254,10 +1350,31 @@ static void raid10d(mddev_t *mddev) for (;;) { char b[BDEVNAME_SIZE]; spin_lock_irqsave(&conf->device_lock, flags); + + if (conf->pending_bio_list.head) { + bio = bio_list_get(&conf->pending_bio_list); + blk_remove_plug(mddev->queue); + spin_unlock_irqrestore(&conf->device_lock, flags); + /* flush any pending bitmap writes to disk before proceeding w/ I/O */ + if (bitmap_unplug(mddev->bitmap) != 0) + printk("%s: bitmap file write failed!\n", mdname(mddev)); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + unplug = 1; + + continue; + } + if (list_empty(head)) break; r10_bio = list_entry(head->prev, r10bio_t, retry_list); list_del(head->prev); + conf->nr_queued--; spin_unlock_irqrestore(&conf->device_lock, flags); mddev = r10_bio->mddev; @@ -1270,8 +1387,96 @@ static void raid10d(mddev_t *mddev) unplug = 1; } else { int mirror; + /* we got a read error. Maybe the drive is bad. Maybe just + * the block and we can fix it. + * We freeze all other IO, and try reading the block from + * other devices. When we find one, we re-write + * and check it that fixes the read error. + * This is all done synchronously while the array is + * frozen. + */ + int sect = 0; /* Offset from r10_bio->sector */ + int sectors = r10_bio->sectors; + freeze_array(conf); + if (mddev->ro == 0) while(sectors) { + int s = sectors; + int sl = r10_bio->read_slot; + int success = 0; + + if (s > (PAGE_SIZE>>9)) + s = PAGE_SIZE >> 9; + + do { + int d = r10_bio->devs[sl].devnum; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags) && + sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, + conf->tmppage, READ)) + success = 1; + else { + sl++; + if (sl == conf->copies) + sl = 0; + } + } while (!success && sl != r10_bio->read_slot); + + if (success) { + int start = sl; + /* write it back and re-read */ + while (sl != r10_bio->read_slot) { + int d; + if (sl==0) + sl = conf->copies; + sl--; + d = r10_bio->devs[sl].devnum; + rdev = conf->mirrors[d].rdev; + atomic_add(s, &rdev->corrected_errors); + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, conf->tmppage, WRITE) == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + sl = start; + while (sl != r10_bio->read_slot) { + int d; + if (sl==0) + sl = conf->copies; + sl--; + d = r10_bio->devs[sl].devnum; + rdev = conf->mirrors[d].rdev; + if (rdev && + test_bit(In_sync, &rdev->flags)) { + if (sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, conf->tmppage, READ) == 0) + /* Well, this device is dead */ + md_error(mddev, rdev); + } + } + } else { + /* Cannot read from anywhere -- bye bye array */ + md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev); + break; + } + sectors -= s; + sect += s; + } + + unfreeze_array(conf); + bio = r10_bio->devs[r10_bio->read_slot].bio; - r10_bio->devs[r10_bio->read_slot].bio = NULL; + r10_bio->devs[r10_bio->read_slot].bio = + mddev->ro ? IO_BLOCKED : NULL; bio_put(bio); mirror = read_balance(conf, r10_bio); if (mirror == -1) { @@ -1360,6 +1565,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i sector_t max_sector, nr_sectors; int disk; int i; + int max_sync; + int sync_blocks; sector_t sectors_skipped = 0; int chunks_skipped = 0; @@ -1373,6 +1580,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) max_sector = mddev->resync_max_sectors; if (sector_nr >= max_sector) { + /* If we aborted, we need to abort the + * sync on the 'current' bitmap chucks (there can + * be several when recovering multiple devices). + * as we may have started syncing it but not finished. + * We can find the current address in + * mddev->curr_resync, but for recovery, + * we need to convert that to several + * virtual addresses. + */ + if (mddev->curr_resync < max_sector) { /* aborted */ + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + bitmap_end_sync(mddev->bitmap, mddev->curr_resync, + &sync_blocks, 1); + else for (i=0; i<conf->raid_disks; i++) { + sector_t sect = + raid10_find_virt(conf, mddev->curr_resync, i); + bitmap_end_sync(mddev->bitmap, sect, + &sync_blocks, 1); + } + } else /* completed sync */ + conf->fullsync = 0; + + bitmap_close_sync(mddev->bitmap); close_sync(conf); *skipped = 1; return sectors_skipped; @@ -1395,9 +1625,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i * If there is non-resync activity waiting for us then * put in a delay to throttle resync. */ - if (!go_faster && waitqueue_active(&conf->wait_resume)) + if (!go_faster && conf->nr_waiting) msleep_interruptible(1000); - device_barrier(conf, sector_nr + RESYNC_SECTORS); /* Again, very different code for resync and recovery. * Both must result in an r10bio with a list of bios that @@ -1414,6 +1643,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i * end_sync_write if we will want to write. */ + max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* recovery... the complicated one */ int i, j, k; @@ -1422,14 +1652,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (i=0 ; i<conf->raid_disks; i++) if (conf->mirrors[i].rdev && !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { + int still_degraded = 0; /* want to reconstruct this device */ r10bio_t *rb2 = r10_bio; + sector_t sect = raid10_find_virt(conf, sector_nr, i); + int must_sync; + /* Unless we are doing a full sync, we only need + * to recover the block if it is set in the bitmap + */ + must_sync = bitmap_start_sync(mddev->bitmap, sect, + &sync_blocks, 1); + if (sync_blocks < max_sync) + max_sync = sync_blocks; + if (!must_sync && + !conf->fullsync) { + /* yep, skip the sync_blocks here, but don't assume + * that there will never be anything to do here + */ + chunks_skipped = -1; + continue; + } r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); - spin_lock_irq(&conf->resync_lock); - conf->nr_pending++; - if (rb2) conf->barrier++; - spin_unlock_irq(&conf->resync_lock); + raise_barrier(conf, rb2 != NULL); atomic_set(&r10_bio->remaining, 0); r10_bio->master_bio = (struct bio*)rb2; @@ -1437,8 +1682,23 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i atomic_inc(&rb2->remaining); r10_bio->mddev = mddev; set_bit(R10BIO_IsRecover, &r10_bio->state); - r10_bio->sector = raid10_find_virt(conf, sector_nr, i); + r10_bio->sector = sect; + raid10_find_phys(conf, r10_bio); + /* Need to check if this section will still be + * degraded + */ + for (j=0; j<conf->copies;j++) { + int d = r10_bio->devs[j].devnum; + if (conf->mirrors[d].rdev == NULL || + test_bit(Faulty, &conf->mirrors[d].rdev->flags)) { + still_degraded = 1; + break; + } + } + must_sync = bitmap_start_sync(mddev->bitmap, sect, + &sync_blocks, still_degraded); + for (j=0; j<conf->copies;j++) { int d = r10_bio->devs[j].devnum; if (conf->mirrors[d].rdev && @@ -1498,14 +1758,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i } else { /* resync. Schedule a read for every block at this virt offset */ int count = 0; - r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); - spin_lock_irq(&conf->resync_lock); - conf->nr_pending++; - spin_unlock_irq(&conf->resync_lock); + if (!bitmap_start_sync(mddev->bitmap, sector_nr, + &sync_blocks, mddev->degraded) && + !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + /* We can skip this block */ + *skipped = 1; + return sync_blocks + sectors_skipped; + } + if (sync_blocks < max_sync) + max_sync = sync_blocks; + r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio->mddev = mddev; atomic_set(&r10_bio->remaining, 0); + raise_barrier(conf, 0); + conf->next_resync = sector_nr; r10_bio->master_bio = NULL; r10_bio->sector = sector_nr; @@ -1558,6 +1826,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i } nr_sectors = 0; + if (sector_nr + max_sync < max_sector) + max_sector = sector_nr + max_sync; do { struct page *page; int len = PAGE_SIZE; @@ -1632,11 +1902,11 @@ static int run(mddev_t *mddev) int nc, fc; sector_t stride, size; - if (mddev->level != 10) { - printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n", - mdname(mddev), mddev->level); - goto out; + if (mddev->chunk_size == 0) { + printk(KERN_ERR "md/raid10: non-zero chunk size required.\n"); + return -EINVAL; } + nc = mddev->layout & 255; fc = (mddev->layout >> 8) & 255; if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || @@ -1650,22 +1920,24 @@ static int run(mddev_t *mddev) * bookkeeping area. [whatever we allocate in run(), * should be freed in stop()] */ - conf = kmalloc(sizeof(conf_t), GFP_KERNEL); + conf = kzalloc(sizeof(conf_t), GFP_KERNEL); mddev->private = conf; if (!conf) { printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", mdname(mddev)); goto out; } - memset(conf, 0, sizeof(*conf)); - conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks, + conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->mirrors) { printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", mdname(mddev)); goto out_free_conf; } - memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); + + conf->tmppage = alloc_page(GFP_KERNEL); + if (!conf->tmppage) + goto out_free_conf; conf->near_copies = nc; conf->far_copies = fc; @@ -1713,8 +1985,7 @@ static int run(mddev_t *mddev) INIT_LIST_HEAD(&conf->retry_list); spin_lock_init(&conf->resync_lock); - init_waitqueue_head(&conf->wait_idle); - init_waitqueue_head(&conf->wait_resume); + init_waitqueue_head(&conf->wait_barrier); /* need to check that every block has at least one working mirror */ if (!enough(conf)) { @@ -1763,7 +2034,7 @@ static int run(mddev_t *mddev) * maybe... */ { - int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; + int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE; stripe /= conf->near_copies; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; @@ -1776,6 +2047,7 @@ static int run(mddev_t *mddev) out_free_conf: if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); + safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); mddev->private = NULL; @@ -1798,10 +2070,31 @@ static int stop(mddev_t *mddev) return 0; } +static void raid10_quiesce(mddev_t *mddev, int state) +{ + conf_t *conf = mddev_to_conf(mddev); + + switch(state) { + case 1: + raise_barrier(conf, 0); + break; + case 0: + lower_barrier(conf); + break; + } + if (mddev->thread) { + if (mddev->bitmap) + mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; + else + mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + md_wakeup_thread(mddev->thread); + } +} -static mdk_personality_t raid10_personality = +static struct mdk_personality raid10_personality = { .name = "raid10", + .level = 10, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -1812,19 +2105,22 @@ static mdk_personality_t raid10_personality = .hot_remove_disk= raid10_remove_disk, .spare_active = raid10_spare_active, .sync_request = sync_request, + .quiesce = raid10_quiesce, }; static int __init raid_init(void) { - return register_md_personality(RAID10, &raid10_personality); + return register_md_personality(&raid10_personality); } static void raid_exit(void) { - unregister_md_personality(RAID10); + unregister_md_personality(&raid10_personality); } module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-9"); /* RAID10 */ +MODULE_ALIAS("md-raid10"); +MODULE_ALIAS("md-level-10"); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index fafc4bc..54f4a98 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -35,12 +35,10 @@ #define STRIPE_SHIFT (PAGE_SHIFT - 9) #define STRIPE_SECTORS (STRIPE_SIZE>>9) #define IO_THRESHOLD 1 -#define HASH_PAGES 1 -#define HASH_PAGES_ORDER 0 -#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *)) +#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) #define HASH_MASK (NR_HASH - 1) -#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) +#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) /* bio's attached to a stripe+device for I/O are linked together in bi_sector * order without overlap. There may be several bio's per stripe+device, and @@ -113,29 +111,21 @@ static void release_stripe(struct stripe_head *sh) spin_unlock_irqrestore(&conf->device_lock, flags); } -static void remove_hash(struct stripe_head *sh) +static inline void remove_hash(struct stripe_head *sh) { PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); - if (sh->hash_pprev) { - if (sh->hash_next) - sh->hash_next->hash_pprev = sh->hash_pprev; - *sh->hash_pprev = sh->hash_next; - sh->hash_pprev = NULL; - } + hlist_del_init(&sh->hash); } -static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) +static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) { - struct stripe_head **shp = &stripe_hash(conf, sh->sector); + struct hlist_head *hp = stripe_hash(conf, sh->sector); PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); CHECK_DEVLOCK(); - if ((sh->hash_next = *shp) != NULL) - (*shp)->hash_pprev = &sh->hash_next; - *shp = sh; - sh->hash_pprev = shp; + hlist_add_head(&sh->hash, hp); } @@ -167,7 +157,7 @@ static void shrink_buffers(struct stripe_head *sh, int num) if (!p) continue; sh->dev[i].page = NULL; - page_cache_release(p); + put_page(p); } } @@ -228,10 +218,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) { struct stripe_head *sh; + struct hlist_node *hn; CHECK_DEVLOCK(); PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); - for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) + hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) if (sh->sector == sector) return sh; PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); @@ -417,7 +408,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, set_bit(R5_UPTODATE, &sh->dev[i].flags); #endif if (test_bit(R5_ReadError, &sh->dev[i].flags)) { - printk("R5: read error corrected!!\n"); + printk(KERN_INFO "raid5: read error corrected!!\n"); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } @@ -428,13 +419,14 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&conf->disks[i].rdev->read_errors); if (conf->mddev->degraded) - printk("R5: read error not correctable.\n"); + printk(KERN_WARNING "raid5: read error not correctable.\n"); else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) /* Oh, no!!! */ - printk("R5: read error NOT corrected!!\n"); + printk(KERN_WARNING "raid5: read error NOT corrected!!\n"); else if (atomic_read(&conf->disks[i].rdev->read_errors) > conf->max_nr_stripes) - printk("raid5: Too many read errors, failing device.\n"); + printk(KERN_WARNING + "raid5: Too many read errors, failing device.\n"); else retry = 1; if (retry) @@ -604,7 +596,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; break; default: - printk("raid5: unsupported algorithm %d\n", + printk(KERN_ERR "raid5: unsupported algorithm %d\n", conf->algorithm); } @@ -645,7 +637,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) i -= (sh->pd_idx + 1); break; default: - printk("raid5: unsupported algorithm %d\n", + printk(KERN_ERR "raid5: unsupported algorithm %d\n", conf->algorithm); } @@ -654,7 +646,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { - printk("compute_blocknr: map not correct\n"); + printk(KERN_ERR "compute_blocknr: map not correct\n"); return 0; } return r_sector; @@ -737,7 +729,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx) if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) ptr[count++] = p; else - printk("compute_block() %d, stripe %llu, %d" + printk(KERN_ERR "compute_block() %d, stripe %llu, %d" " not present\n", dd_idx, (unsigned long long)sh->sector, i); @@ -960,11 +952,11 @@ static void handle_stripe(struct stripe_head *sh) syncing = test_bit(STRIPE_SYNCING, &sh->state); /* Now to look around and see what can be done */ + rcu_read_lock(); for (i=disks; i--; ) { mdk_rdev_t *rdev; dev = &sh->dev[i]; clear_bit(R5_Insync, &dev->flags); - clear_bit(R5_Syncio, &dev->flags); PRINTK("check %d: state 0x%lx read %p write %p written %p\n", i, dev->flags, dev->toread, dev->towrite, dev->written); @@ -1003,9 +995,9 @@ static void handle_stripe(struct stripe_head *sh) non_overwrite++; } if (dev->written) written++; - rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ + rdev = rcu_dereference(conf->disks[i].rdev); if (!rdev || !test_bit(In_sync, &rdev->flags)) { - /* The ReadError flag wil just be confusing now */ + /* The ReadError flag will just be confusing now */ clear_bit(R5_ReadError, &dev->flags); clear_bit(R5_ReWrite, &dev->flags); } @@ -1016,6 +1008,7 @@ static void handle_stripe(struct stripe_head *sh) } else set_bit(R5_Insync, &dev->flags); } + rcu_read_unlock(); PRINTK("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d\n", locked, uptodate, to_read, to_write, failed, failed_num); @@ -1027,10 +1020,13 @@ static void handle_stripe(struct stripe_head *sh) int bitmap_end = 0; if (test_bit(R5_ReadError, &sh->dev[i].flags)) { - mdk_rdev_t *rdev = conf->disks[i].rdev; + mdk_rdev_t *rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && test_bit(In_sync, &rdev->flags)) /* multiple read failures in one stripe */ md_error(conf->mddev, rdev); + rcu_read_unlock(); } spin_lock_irq(&conf->device_lock); @@ -1179,9 +1175,6 @@ static void handle_stripe(struct stripe_head *sh) locked++; PRINTK("Reading block %d (sync=%d)\n", i, syncing); - if (syncing) - md_sync_acct(conf->disks[i].rdev->bdev, - STRIPE_SECTORS); } } } @@ -1288,7 +1281,7 @@ static void handle_stripe(struct stripe_head *sh) * is available */ if (syncing && locked == 0 && - !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) { + !test_bit(STRIPE_INSYNC, &sh->state)) { set_bit(STRIPE_HANDLE, &sh->state); if (failed == 0) { char *pagea; @@ -1306,27 +1299,25 @@ static void handle_stripe(struct stripe_head *sh) if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) /* don't try to repair!! */ set_bit(STRIPE_INSYNC, &sh->state); + else { + compute_block(sh, sh->pd_idx); + uptodate++; + } } } if (!test_bit(STRIPE_INSYNC, &sh->state)) { + /* either failed parity check, or recovery is happening */ if (failed==0) failed_num = sh->pd_idx; - /* should be able to compute the missing block and write it to spare */ - if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) { - if (uptodate+1 != disks) - BUG(); - compute_block(sh, failed_num); - uptodate++; - } - if (uptodate != disks) - BUG(); dev = &sh->dev[failed_num]; + BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); + BUG_ON(uptodate != disks); + set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); clear_bit(STRIPE_DEGRADED, &sh->state); locked++; set_bit(STRIPE_INSYNC, &sh->state); - set_bit(R5_Syncio, &dev->flags); } } if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { @@ -1392,7 +1383,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_unlock(); if (rdev) { - if (test_bit(R5_Syncio, &sh->dev[i].flags)) + if (syncing) md_sync_acct(rdev->bdev, STRIPE_SECTORS); bi->bi_bdev = rdev->bdev; @@ -1409,6 +1400,9 @@ static void handle_stripe(struct stripe_head *sh) bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; bi->bi_next = NULL; + if (rw == WRITE && + test_bit(R5_ReWrite, &sh->dev[i].flags)) + atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); generic_make_request(bi); } else { if (rw == 1) @@ -1822,21 +1816,21 @@ static int run(mddev_t *mddev) struct list_head *tmp; if (mddev->level != 5 && mddev->level != 4) { - printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level); + printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n", + mdname(mddev), mddev->level); return -EIO; } - mddev->private = kmalloc (sizeof (raid5_conf_t) - + mddev->raid_disks * sizeof(struct disk_info), - GFP_KERNEL); + mddev->private = kzalloc(sizeof (raid5_conf_t) + + mddev->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); if ((conf = mddev->private) == NULL) goto abort; - memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) ); + conf->mddev = mddev; - if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) + if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; - memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE); spin_lock_init(&conf->device_lock); init_waitqueue_head(&conf->wait_for_stripe); @@ -1903,10 +1897,17 @@ static int run(mddev_t *mddev) if (mddev->degraded == 1 && mddev->recovery_cp != MaxSector) { - printk(KERN_ERR - "raid5: cannot start dirty degraded array for %s\n", - mdname(mddev)); - goto abort; + if (mddev->ok_start_degraded) + printk(KERN_WARNING + "raid5: starting dirty degraded array: %s" + "- data corruption possible.\n", + mdname(mddev)); + else { + printk(KERN_ERR + "raid5: cannot start dirty degraded array for %s\n", + mdname(mddev)); + goto abort; + } } { @@ -1948,7 +1949,7 @@ static int run(mddev_t *mddev) */ { int stripe = (mddev->raid_disks-1) * mddev->chunk_size - / PAGE_CACHE_SIZE; + / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; } @@ -1956,9 +1957,6 @@ static int run(mddev_t *mddev) /* Ok, everything is just fine now */ sysfs_create_group(&mddev->kobj, &raid5_attrs_group); - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - mddev->queue->unplug_fn = raid5_unplug_device; mddev->queue->issue_flush_fn = raid5_issue_flush; @@ -1967,9 +1965,7 @@ static int run(mddev_t *mddev) abort: if (conf) { print_raid5_conf(conf); - if (conf->stripe_hashtbl) - free_pages((unsigned long) conf->stripe_hashtbl, - HASH_PAGES_ORDER); + kfree(conf->stripe_hashtbl); kfree(conf); } mddev->private = NULL; @@ -1986,7 +1982,7 @@ static int stop(mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; shrink_stripes(conf); - free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); + kfree(conf->stripe_hashtbl); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); kfree(conf); @@ -2014,12 +2010,12 @@ static void print_sh (struct stripe_head *sh) static void printall (raid5_conf_t *conf) { struct stripe_head *sh; + struct hlist_node *hn; int i; spin_lock_irq(&conf->device_lock); for (i = 0; i < NR_HASH; i++) { - sh = conf->stripe_hashtbl[i]; - for (; sh; sh = sh->hash_next) { + hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { if (sh->raid_conf != conf) continue; print_sh(sh); @@ -2192,17 +2188,12 @@ static void raid5_quiesce(mddev_t *mddev, int state) spin_unlock_irq(&conf->device_lock); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } -static mdk_personality_t raid5_personality= + +static struct mdk_personality raid5_personality = { .name = "raid5", + .level = 5, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -2217,17 +2208,42 @@ static mdk_personality_t raid5_personality= .quiesce = raid5_quiesce, }; -static int __init raid5_init (void) +static struct mdk_personality raid4_personality = { - return register_md_personality (RAID5, &raid5_personality); + .name = "raid4", + .level = 4, + .owner = THIS_MODULE, + .make_request = make_request, + .run = run, + .stop = stop, + .status = status, + .error_handler = error, + .hot_add_disk = raid5_add_disk, + .hot_remove_disk= raid5_remove_disk, + .spare_active = raid5_spare_active, + .sync_request = sync_request, + .resize = raid5_resize, + .quiesce = raid5_quiesce, +}; + +static int __init raid5_init(void) +{ + register_md_personality(&raid5_personality); + register_md_personality(&raid4_personality); + return 0; } -static void raid5_exit (void) +static void raid5_exit(void) { - unregister_md_personality (RAID5); + unregister_md_personality(&raid5_personality); + unregister_md_personality(&raid4_personality); } module_init(raid5_init); module_exit(raid5_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-4"); /* RAID5 */ +MODULE_ALIAS("md-raid5"); +MODULE_ALIAS("md-raid4"); +MODULE_ALIAS("md-level-5"); +MODULE_ALIAS("md-level-4"); diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 0000d16..8c823d6 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -40,12 +40,10 @@ #define STRIPE_SHIFT (PAGE_SHIFT - 9) #define STRIPE_SECTORS (STRIPE_SIZE>>9) #define IO_THRESHOLD 1 -#define HASH_PAGES 1 -#define HASH_PAGES_ORDER 0 -#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *)) +#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) #define HASH_MASK (NR_HASH - 1) -#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) +#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) /* bio's attached to a stripe+device for I/O are linked together in bi_sector * order without overlap. There may be several bio's per stripe+device, and @@ -132,29 +130,21 @@ static void release_stripe(struct stripe_head *sh) spin_unlock_irqrestore(&conf->device_lock, flags); } -static void remove_hash(struct stripe_head *sh) +static inline void remove_hash(struct stripe_head *sh) { PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); - if (sh->hash_pprev) { - if (sh->hash_next) - sh->hash_next->hash_pprev = sh->hash_pprev; - *sh->hash_pprev = sh->hash_next; - sh->hash_pprev = NULL; - } + hlist_del_init(&sh->hash); } -static __inline__ void insert_hash(raid6_conf_t *conf, struct stripe_head *sh) +static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh) { - struct stripe_head **shp = &stripe_hash(conf, sh->sector); + struct hlist_head *hp = stripe_hash(conf, sh->sector); PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); CHECK_DEVLOCK(); - if ((sh->hash_next = *shp) != NULL) - (*shp)->hash_pprev = &sh->hash_next; - *shp = sh; - sh->hash_pprev = shp; + hlist_add_head(&sh->hash, hp); } @@ -186,7 +176,7 @@ static void shrink_buffers(struct stripe_head *sh, int num) if (!p) continue; sh->dev[i].page = NULL; - page_cache_release(p); + put_page(p); } } @@ -247,10 +237,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector) { struct stripe_head *sh; + struct hlist_node *hn; CHECK_DEVLOCK(); PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); - for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) + hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash) if (sh->sector == sector) return sh; PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); @@ -367,8 +358,8 @@ static void shrink_stripes(raid6_conf_t *conf) conf->slab_cache = NULL; } -static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done, - int error) +static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done, + int error) { struct stripe_head *sh = bi->bi_private; raid6_conf_t *conf = sh->raid_conf; @@ -420,9 +411,35 @@ static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done, #else set_bit(R5_UPTODATE, &sh->dev[i].flags); #endif + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + printk(KERN_INFO "raid6: read error corrected!!\n"); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + } + if (atomic_read(&conf->disks[i].rdev->read_errors)) + atomic_set(&conf->disks[i].rdev->read_errors, 0); } else { - md_error(conf->mddev, conf->disks[i].rdev); + int retry = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); + atomic_inc(&conf->disks[i].rdev->read_errors); + if (conf->mddev->degraded) + printk(KERN_WARNING "raid6: read error not correctable.\n"); + else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) + /* Oh, no!!! */ + printk(KERN_WARNING "raid6: read error NOT corrected!!\n"); + else if (atomic_read(&conf->disks[i].rdev->read_errors) + > conf->max_nr_stripes) + printk(KERN_WARNING + "raid6: Too many read errors, failing device.\n"); + else + retry = 1; + if (retry) + set_bit(R5_ReadError, &sh->dev[i].flags); + else { + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + md_error(conf->mddev, conf->disks[i].rdev); + } } rdev_dec_pending(conf->disks[i].rdev, conf->mddev); #if 0 @@ -805,7 +822,7 @@ static void compute_parity(struct stripe_head *sh, int method) } /* Compute one missing block */ -static void compute_block_1(struct stripe_head *sh, int dd_idx) +static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) { raid6_conf_t *conf = sh->raid_conf; int i, count, disks = conf->raid_disks; @@ -821,7 +838,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx) compute_parity(sh, UPDATE_PARITY); } else { ptr[0] = page_address(sh->dev[dd_idx].page); - memset(ptr[0], 0, STRIPE_SIZE); + if (!nozero) memset(ptr[0], 0, STRIPE_SIZE); count = 1; for (i = disks ; i--; ) { if (i == dd_idx || i == qd_idx) @@ -838,7 +855,8 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx) } if (count != 1) xor_block(count, STRIPE_SIZE, ptr); - set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); + if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); + else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); } } @@ -871,7 +889,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) return; } else { /* We're missing D+Q; recompute D from P */ - compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1); + compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */ return; } @@ -982,6 +1000,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in } +static int page_is_zero(struct page *p) +{ + char *a = page_address(p); + return ((*(u32*)a) == 0 && + memcmp(a, a+4, STRIPE_SIZE-4)==0); +} /* * handle_stripe - do things to a stripe. * @@ -1000,7 +1024,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in * */ -static void handle_stripe(struct stripe_head *sh) +static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) { raid6_conf_t *conf = sh->raid_conf; int disks = conf->raid_disks; @@ -1027,11 +1051,11 @@ static void handle_stripe(struct stripe_head *sh) syncing = test_bit(STRIPE_SYNCING, &sh->state); /* Now to look around and see what can be done */ + rcu_read_lock(); for (i=disks; i--; ) { mdk_rdev_t *rdev; dev = &sh->dev[i]; clear_bit(R5_Insync, &dev->flags); - clear_bit(R5_Syncio, &dev->flags); PRINTK("check %d: state 0x%lx read %p write %p written %p\n", i, dev->flags, dev->toread, dev->towrite, dev->written); @@ -1070,14 +1094,21 @@ static void handle_stripe(struct stripe_head *sh) non_overwrite++; } if (dev->written) written++; - rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ + rdev = rcu_dereference(conf->disks[i].rdev); if (!rdev || !test_bit(In_sync, &rdev->flags)) { + /* The ReadError flag will just be confusing now */ + clear_bit(R5_ReadError, &dev->flags); + clear_bit(R5_ReWrite, &dev->flags); + } + if (!rdev || !test_bit(In_sync, &rdev->flags) + || test_bit(R5_ReadError, &dev->flags)) { if ( failed < 2 ) failed_num[failed] = i; failed++; } else set_bit(R5_Insync, &dev->flags); } + rcu_read_unlock(); PRINTK("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d,%d\n", locked, uptodate, to_read, to_write, failed, @@ -1088,6 +1119,17 @@ static void handle_stripe(struct stripe_head *sh) if (failed > 2 && to_read+to_write+written) { for (i=disks; i--; ) { int bitmap_end = 0; + + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + mdk_rdev_t *rdev; + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(In_sync, &rdev->flags)) + /* multiple read failures in one stripe */ + md_error(conf->mddev, rdev); + rcu_read_unlock(); + } + spin_lock_irq(&conf->device_lock); /* fail all writes first */ bi = sh->dev[i].towrite; @@ -1123,7 +1165,8 @@ static void handle_stripe(struct stripe_head *sh) } /* fail any reads if this device is non-operational */ - if (!test_bit(R5_Insync, &sh->dev[i].flags)) { + if (!test_bit(R5_Insync, &sh->dev[i].flags) || + test_bit(R5_ReadError, &sh->dev[i].flags)) { bi = sh->dev[i].toread; sh->dev[i].toread = NULL; if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) @@ -1228,7 +1271,7 @@ static void handle_stripe(struct stripe_head *sh) if (uptodate == disks-1) { PRINTK("Computing stripe %llu block %d\n", (unsigned long long)sh->sector, i); - compute_block_1(sh, i); + compute_block_1(sh, i, 0); uptodate++; } else if ( uptodate == disks-2 && failed >= 2 ) { /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ @@ -1259,9 +1302,6 @@ static void handle_stripe(struct stripe_head *sh) locked++; PRINTK("Reading block %d (sync=%d)\n", i, syncing); - if (syncing) - md_sync_acct(conf->disks[i].rdev->bdev, - STRIPE_SECTORS); } } } @@ -1323,7 +1363,7 @@ static void handle_stripe(struct stripe_head *sh) /* We have failed blocks and need to compute them */ switch ( failed ) { case 0: BUG(); - case 1: compute_block_1(sh, failed_num[0]); break; + case 1: compute_block_1(sh, failed_num[0], 0); break; case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; default: BUG(); /* This request should have been failed? */ } @@ -1338,12 +1378,10 @@ static void handle_stripe(struct stripe_head *sh) (unsigned long long)sh->sector, i); locked++; set_bit(R5_Wantwrite, &sh->dev[i].flags); -#if 0 /**** FIX: I don't understand the logic here... ****/ - if (!test_bit(R5_Insync, &sh->dev[i].flags) - || ((i==pd_idx || i==qd_idx) && failed == 0)) /* FIX? */ - set_bit(STRIPE_INSYNC, &sh->state); -#endif } + /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ + set_bit(STRIPE_INSYNC, &sh->state); + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { atomic_dec(&conf->preread_active_stripes); if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) @@ -1356,84 +1394,119 @@ static void handle_stripe(struct stripe_head *sh) * Any reads will already have been scheduled, so we just see if enough data * is available */ - if (syncing && locked == 0 && - !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 2) { - set_bit(STRIPE_HANDLE, &sh->state); -#if 0 /* RAID-6: Don't support CHECK PARITY yet */ - if (failed == 0) { - char *pagea; - if (uptodate != disks) - BUG(); - compute_parity(sh, CHECK_PARITY); - uptodate--; - pagea = page_address(sh->dev[pd_idx].page); - if ((*(u32*)pagea) == 0 && - !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { - /* parity is correct (on disc, not in buffer any more) */ - set_bit(STRIPE_INSYNC, &sh->state); - } - } -#endif - if (!test_bit(STRIPE_INSYNC, &sh->state)) { - int failed_needupdate[2]; - struct r5dev *adev, *bdev; - - if ( failed < 1 ) - failed_num[0] = pd_idx; - if ( failed < 2 ) - failed_num[1] = (failed_num[0] == qd_idx) ? pd_idx : qd_idx; + if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) { + int update_p = 0, update_q = 0; + struct r5dev *dev; - failed_needupdate[0] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[0]].flags); - failed_needupdate[1] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[1]].flags); + set_bit(STRIPE_HANDLE, &sh->state); - PRINTK("sync: failed=%d num=%d,%d fnu=%u%u\n", - failed, failed_num[0], failed_num[1], failed_needupdate[0], failed_needupdate[1]); + BUG_ON(failed>2); + BUG_ON(uptodate < disks); + /* Want to check and possibly repair P and Q. + * However there could be one 'failed' device, in which + * case we can only check one of them, possibly using the + * other to generate missing data + */ -#if 0 /* RAID-6: This code seems to require that CHECK_PARITY destroys the uptodateness of the parity */ - /* should be able to compute the missing block(s) and write to spare */ - if ( failed_needupdate[0] ^ failed_needupdate[1] ) { - if (uptodate+1 != disks) - BUG(); - compute_block_1(sh, failed_needupdate[0] ? failed_num[0] : failed_num[1]); - uptodate++; - } else if ( failed_needupdate[0] & failed_needupdate[1] ) { - if (uptodate+2 != disks) - BUG(); - compute_block_2(sh, failed_num[0], failed_num[1]); - uptodate += 2; + /* If !tmp_page, we cannot do the calculations, + * but as we have set STRIPE_HANDLE, we will soon be called + * by stripe_handle with a tmp_page - just wait until then. + */ + if (tmp_page) { + if (failed == q_failed) { + /* The only possible failed device holds 'Q', so it makes + * sense to check P (If anything else were failed, we would + * have used P to recreate it). + */ + compute_block_1(sh, pd_idx, 1); + if (!page_is_zero(sh->dev[pd_idx].page)) { + compute_block_1(sh,pd_idx,0); + update_p = 1; + } + } + if (!q_failed && failed < 2) { + /* q is not failed, and we didn't use it to generate + * anything, so it makes sense to check it + */ + memcpy(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE); + compute_parity(sh, UPDATE_PARITY); + if (memcmp(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE)!= 0) { + clear_bit(STRIPE_INSYNC, &sh->state); + update_q = 1; + } + } + if (update_p || update_q) { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + update_p = update_q = 0; } -#else - compute_block_2(sh, failed_num[0], failed_num[1]); - uptodate += failed_needupdate[0] + failed_needupdate[1]; -#endif - if (uptodate != disks) - BUG(); + /* now write out any block on a failed drive, + * or P or Q if they need it + */ - PRINTK("Marking for sync stripe %llu blocks %d,%d\n", - (unsigned long long)sh->sector, failed_num[0], failed_num[1]); + if (failed == 2) { + dev = &sh->dev[failed_num[1]]; + locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (failed >= 1) { + dev = &sh->dev[failed_num[0]]; + locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } - /**** FIX: Should we really do both of these unconditionally? ****/ - adev = &sh->dev[failed_num[0]]; - locked += !test_bit(R5_LOCKED, &adev->flags); - set_bit(R5_LOCKED, &adev->flags); - set_bit(R5_Wantwrite, &adev->flags); - bdev = &sh->dev[failed_num[1]]; - locked += !test_bit(R5_LOCKED, &bdev->flags); - set_bit(R5_LOCKED, &bdev->flags); + if (update_p) { + dev = &sh->dev[pd_idx]; + locked ++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (update_q) { + dev = &sh->dev[qd_idx]; + locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } clear_bit(STRIPE_DEGRADED, &sh->state); - set_bit(R5_Wantwrite, &bdev->flags); set_bit(STRIPE_INSYNC, &sh->state); - set_bit(R5_Syncio, &adev->flags); - set_bit(R5_Syncio, &bdev->flags); } } + if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS,1); clear_bit(STRIPE_SYNCING, &sh->state); } + /* If the failed drives are just a ReadError, then we might need + * to progress the repair/check process + */ + if (failed <= 2 && ! conf->mddev->ro) + for (i=0; i<failed;i++) { + dev = &sh->dev[failed_num[i]]; + if (test_bit(R5_ReadError, &dev->flags) + && !test_bit(R5_LOCKED, &dev->flags) + && test_bit(R5_UPTODATE, &dev->flags) + ) { + if (!test_bit(R5_ReWrite, &dev->flags)) { + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_ReWrite, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } else { + /* let's read it back */ + set_bit(R5_Wantread, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } + } + } spin_unlock(&sh->lock); while ((bi=return_bi)) { @@ -1472,7 +1545,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_unlock(); if (rdev) { - if (test_bit(R5_Syncio, &sh->dev[i].flags)) + if (syncing) md_sync_acct(rdev->bdev, STRIPE_SECTORS); bi->bi_bdev = rdev->bdev; @@ -1489,6 +1562,9 @@ static void handle_stripe(struct stripe_head *sh) bi->bi_io_vec[0].bv_offset = 0; bi->bi_size = STRIPE_SIZE; bi->bi_next = NULL; + if (rw == WRITE && + test_bit(R5_ReWrite, &sh->dev[i].flags)) + atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); generic_make_request(bi); } else { if (rw == 1) @@ -1664,7 +1740,7 @@ static int make_request (request_queue_t *q, struct bio * bi) } finish_wait(&conf->wait_for_overlap, &w); raid6_plug_device(conf); - handle_stripe(sh); + handle_stripe(sh, NULL); release_stripe(sh); } else { /* cannot get stripe for read-ahead, just give-up */ @@ -1728,6 +1804,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return rv; } if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { /* we can skip this block, and probably more */ sync_blocks /= STRIPE_SECTORS; @@ -1765,7 +1842,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i clear_bit(STRIPE_INSYNC, &sh->state); spin_unlock(&sh->lock); - handle_stripe(sh); + handle_stripe(sh, NULL); release_stripe(sh); return STRIPE_SECTORS; @@ -1821,7 +1898,7 @@ static void raid6d (mddev_t *mddev) spin_unlock_irq(&conf->device_lock); handled++; - handle_stripe(sh); + handle_stripe(sh, conf->spare_page); release_stripe(sh); spin_lock_irq(&conf->device_lock); @@ -1848,17 +1925,19 @@ static int run(mddev_t *mddev) return -EIO; } - mddev->private = kmalloc (sizeof (raid6_conf_t) - + mddev->raid_disks * sizeof(struct disk_info), - GFP_KERNEL); + mddev->private = kzalloc(sizeof (raid6_conf_t) + + mddev->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); if ((conf = mddev->private) == NULL) goto abort; - memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) ); conf->mddev = mddev; - if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) + if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) + goto abort; + + conf->spare_page = alloc_page(GFP_KERNEL); + if (!conf->spare_page) goto abort; - memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE); spin_lock_init(&conf->device_lock); init_waitqueue_head(&conf->wait_for_stripe); @@ -1929,13 +2008,18 @@ static int run(mddev_t *mddev) goto abort; } -#if 0 /* FIX: For now */ if (mddev->degraded > 0 && mddev->recovery_cp != MaxSector) { - printk(KERN_ERR "raid6: cannot start dirty degraded array for %s\n", mdname(mddev)); - goto abort; + if (mddev->ok_start_degraded) + printk(KERN_WARNING "raid6: starting dirty degraded array:%s" + "- data corruption possible.\n", + mdname(mddev)); + else { + printk(KERN_ERR "raid6: cannot start dirty degraded array" + " for %s\n", mdname(mddev)); + goto abort; + } } -#endif { mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6"); @@ -1977,7 +2061,7 @@ static int run(mddev_t *mddev) */ { int stripe = (mddev->raid_disks-2) * mddev->chunk_size - / PAGE_CACHE_SIZE; + / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; } @@ -1985,18 +2069,14 @@ static int run(mddev_t *mddev) /* Ok, everything is just fine now */ mddev->array_size = mddev->size * (mddev->raid_disks - 2); - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - mddev->queue->unplug_fn = raid6_unplug_device; mddev->queue->issue_flush_fn = raid6_issue_flush; return 0; abort: if (conf) { print_raid6_conf(conf); - if (conf->stripe_hashtbl) - free_pages((unsigned long) conf->stripe_hashtbl, - HASH_PAGES_ORDER); + safe_put_page(conf->spare_page); + kfree(conf->stripe_hashtbl); kfree(conf); } mddev->private = NULL; @@ -2013,7 +2093,7 @@ static int stop (mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; shrink_stripes(conf); - free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); + kfree(conf->stripe_hashtbl); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ kfree(conf); mddev->private = NULL; @@ -2040,12 +2120,13 @@ static void print_sh (struct seq_file *seq, struct stripe_head *sh) static void printall (struct seq_file *seq, raid6_conf_t *conf) { struct stripe_head *sh; + struct hlist_node *hn; int i; spin_lock_irq(&conf->device_lock); for (i = 0; i < NR_HASH; i++) { sh = conf->stripe_hashtbl[i]; - for (; sh; sh = sh->hash_next) { + hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { if (sh->raid_conf != conf) continue; print_sh(seq, sh); @@ -2223,17 +2304,12 @@ static void raid6_quiesce(mddev_t *mddev, int state) spin_unlock_irq(&conf->device_lock); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } -static mdk_personality_t raid6_personality= + +static struct mdk_personality raid6_personality = { .name = "raid6", + .level = 6, .owner = THIS_MODULE, .make_request = make_request, .run = run, @@ -2248,7 +2324,7 @@ static mdk_personality_t raid6_personality= .quiesce = raid6_quiesce, }; -static int __init raid6_init (void) +static int __init raid6_init(void) { int e; @@ -2256,15 +2332,17 @@ static int __init raid6_init (void) if ( e ) return e; - return register_md_personality (RAID6, &raid6_personality); + return register_md_personality(&raid6_personality); } static void raid6_exit (void) { - unregister_md_personality (RAID6); + unregister_md_personality(&raid6_personality); } module_init(raid6_init); module_exit(raid6_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-8"); /* RAID6 */ +MODULE_ALIAS("md-raid6"); +MODULE_ALIAS("md-level-6"); diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c index ddf184f..6861d40 100644 --- a/drivers/media/video/cpia_pp.c +++ b/drivers/media/video/cpia_pp.c @@ -170,16 +170,9 @@ static size_t cpia_read_nibble (struct parport *port, /* Does the error line indicate end of data? */ if (((i /*& 1*/) == 0) && (parport_read_status(port) & PARPORT_STATUS_ERROR)) { - port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; - DBG("%s: No more nibble data (%d bytes)\n", - port->name, i/2); - - /* Go to reverse idle phase. */ - parport_frob_control (port, - PARPORT_CONTROL_AUTOFD, - PARPORT_CONTROL_AUTOFD); - port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; - break; + DBG("%s: No more nibble data (%d bytes)\n", + port->name, i/2); + goto end_of_data; } /* Event 7: Set nAutoFd low. */ @@ -227,18 +220,21 @@ static size_t cpia_read_nibble (struct parport *port, byte = nibble; } - i /= 2; /* i is now in bytes */ - if (i == len) { /* Read the last nibble without checking data avail. */ - port = port->physport; - if (parport_read_status (port) & PARPORT_STATUS_ERROR) - port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; + if (parport_read_status (port) & PARPORT_STATUS_ERROR) { + end_of_data: + /* Go to reverse idle phase. */ + parport_frob_control (port, + PARPORT_CONTROL_AUTOFD, + PARPORT_CONTROL_AUTOFD); + port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } else - port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; + port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; } - return i; + return i/2; } /* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1) diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig index 43a942a..fef6771 100644 --- a/drivers/message/i2o/Kconfig +++ b/drivers/message/i2o/Kconfig @@ -24,6 +24,18 @@ config I2O If unsure, say N. +config I2O_LCT_NOTIFY_ON_CHANGES + bool "Enable LCT notification" + depends on I2O + default y + ---help--- + Only say N here if you have a I2O controller from SUN. The SUN + firmware doesn't support LCT notification on changes. If this option + is enabled on such a controller the driver will hang up in a endless + loop. On all other controllers say Y. + + If unsure, say Y. + config I2O_EXT_ADAPTEC bool "Enable Adaptec extensions" depends on I2O diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c index 151b228..ac06f10 100644 --- a/drivers/message/i2o/bus-osm.c +++ b/drivers/message/i2o/bus-osm.c @@ -17,7 +17,7 @@ #include <linux/i2o.h> #define OSM_NAME "bus-osm" -#define OSM_VERSION "$Rev$" +#define OSM_VERSION "1.317" #define OSM_DESCRIPTION "I2O Bus Adapter OSM" static struct i2o_driver i2o_bus_driver; @@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = { */ static int i2o_bus_scan(struct i2o_device *dev) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) return -ETIMEDOUT; - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data. + tid); - return i2o_msg_post_wait(dev->iop, m, 60); + return i2o_msg_post_wait(dev->iop, msg, 60); }; /** @@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev) * * Returns count. */ -static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, - size_t count) +static ssize_t i2o_bus_store_scan(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) { struct i2o_device *i2o_dev = to_i2o_device(d); int rc; diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c index 10432f6..3bba7aa 100644 --- a/drivers/message/i2o/config-osm.c +++ b/drivers/message/i2o/config-osm.c @@ -22,7 +22,7 @@ #include <asm/uaccess.h> #define OSM_NAME "config-osm" -#define OSM_VERSION "1.248" +#define OSM_VERSION "1.323" #define OSM_DESCRIPTION "I2O Configuration OSM" /* access mode user rw */ diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h index 9eefedb..9062856 100644 --- a/drivers/message/i2o/core.h +++ b/drivers/message/i2o/core.h @@ -14,8 +14,6 @@ */ /* Exec-OSM */ -extern struct bus_type i2o_bus_type; - extern struct i2o_driver i2o_exec_driver; extern int i2o_exec_lct_get(struct i2o_controller *); @@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void); extern void __exit i2o_exec_exit(void); /* driver */ +extern struct bus_type i2o_bus_type; + extern int i2o_driver_dispatch(struct i2o_controller *, u32); extern int __init i2o_driver_init(void); @@ -33,19 +33,27 @@ extern int __init i2o_pci_init(void); extern void __exit i2o_pci_exit(void); /* device */ +extern struct device_attribute i2o_device_attrs[]; + extern void i2o_device_remove(struct i2o_device *); extern int i2o_device_parse_lct(struct i2o_controller *); /* IOP */ extern struct i2o_controller *i2o_iop_alloc(void); -extern void i2o_iop_free(struct i2o_controller *); + +/** + * i2o_iop_free - Free the i2o_controller struct + * @c: I2O controller to free + */ +static inline void i2o_iop_free(struct i2o_controller *c) +{ + i2o_pool_free(&c->in_msg); + kfree(c); +} extern int i2o_iop_add(struct i2o_controller *); extern void i2o_iop_remove(struct i2o_controller *); -/* config */ -extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); - /* control registers relative to c->base */ #define I2O_IRQ_STATUS 0x30 #define I2O_IRQ_MASK 0x34 diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 8eb50cd..ee18305 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c @@ -35,18 +35,18 @@ static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, u32 type) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); - writel(type, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid); + msg->body[0] = cpu_to_le32(type); - return i2o_msg_post_wait(dev->iop, m, 60); + return i2o_msg_post_wait(dev->iop, msg, 60); } /** @@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev) return rc; } - /** * i2o_device_release - release the memory for a I2O device * @dev: I2O device which should be released @@ -140,10 +139,10 @@ static void i2o_device_release(struct device *dev) kfree(i2o_dev); } - /** - * i2o_device_class_show_class_id - Displays class id of I2O device - * @cd: class device of which the class id should be displayed + * i2o_device_show_class_id - Displays class id of I2O device + * @dev: device of which the class id should be displayed + * @attr: pointer to device attribute * @buf: buffer into which the class id should be printed * * Returns the number of bytes which are printed into the buffer. @@ -159,15 +158,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev, } /** - * i2o_device_class_show_tid - Displays TID of I2O device - * @cd: class device of which the TID should be displayed - * @buf: buffer into which the class id should be printed + * i2o_device_show_tid - Displays TID of I2O device + * @dev: device of which the TID should be displayed + * @attr: pointer to device attribute + * @buf: buffer into which the TID should be printed * * Returns the number of bytes which are printed into the buffer. */ static ssize_t i2o_device_show_tid(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, char *buf) { struct i2o_device *i2o_dev = to_i2o_device(dev); @@ -175,6 +174,7 @@ static ssize_t i2o_device_show_tid(struct device *dev, return strlen(buf) + 1; } +/* I2O device attributes */ struct device_attribute i2o_device_attrs[] = { __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL), __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL), @@ -193,12 +193,10 @@ static struct i2o_device *i2o_device_alloc(void) { struct i2o_device *dev; - dev = kmalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); - memset(dev, 0, sizeof(*dev)); - INIT_LIST_HEAD(&dev->list); init_MUTEX(&dev->lock); @@ -209,66 +207,6 @@ static struct i2o_device *i2o_device_alloc(void) } /** - * i2o_setup_sysfs_links - Adds attributes to the I2O device - * @cd: I2O class device which is added to the I2O device class - * - * This function get called when a I2O device is added to the class. It - * creates the attributes for each device and creates user/parent symlink - * if necessary. - * - * Returns 0 on success or negative error code on failure. - */ -static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev) -{ - struct i2o_controller *c = i2o_dev->iop; - struct i2o_device *tmp; - - /* create user entries for this device */ - tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); - if (tmp && tmp != i2o_dev) - sysfs_create_link(&i2o_dev->device.kobj, - &tmp->device.kobj, "user"); - - /* create user entries refering to this device */ - list_for_each_entry(tmp, &c->devices, list) - if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid && - tmp != i2o_dev) - sysfs_create_link(&tmp->device.kobj, - &i2o_dev->device.kobj, "user"); - - /* create parent entries for this device */ - tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); - if (tmp && tmp != i2o_dev) - sysfs_create_link(&i2o_dev->device.kobj, - &tmp->device.kobj, "parent"); - - /* create parent entries refering to this device */ - list_for_each_entry(tmp, &c->devices, list) - if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid && - tmp != i2o_dev) - sysfs_create_link(&tmp->device.kobj, - &i2o_dev->device.kobj, "parent"); -} - -static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev) -{ - struct i2o_controller *c = i2o_dev->iop; - struct i2o_device *tmp; - - sysfs_remove_link(&i2o_dev->device.kobj, "parent"); - sysfs_remove_link(&i2o_dev->device.kobj, "user"); - - list_for_each_entry(tmp, &c->devices, list) { - if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) - sysfs_remove_link(&tmp->device.kobj, "parent"); - if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) - sysfs_remove_link(&tmp->device.kobj, "user"); - } -} - - - -/** * i2o_device_add - allocate a new I2O device and add it to the IOP * @iop: I2O controller where the device is on * @entry: LCT entry of the I2O device @@ -282,33 +220,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev) static struct i2o_device *i2o_device_add(struct i2o_controller *c, i2o_lct_entry * entry) { - struct i2o_device *dev; + struct i2o_device *i2o_dev, *tmp; - dev = i2o_device_alloc(); - if (IS_ERR(dev)) { + i2o_dev = i2o_device_alloc(); + if (IS_ERR(i2o_dev)) { printk(KERN_ERR "i2o: unable to allocate i2o device\n"); - return dev; + return i2o_dev; } - dev->lct_data = *entry; - dev->iop = c; + i2o_dev->lct_data = *entry; - snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, - dev->lct_data.tid); + snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, + i2o_dev->lct_data.tid); - dev->device.parent = &c->device; + i2o_dev->iop = c; + i2o_dev->device.parent = &c->device; - device_register(&dev->device); + device_register(&i2o_dev->device); - list_add_tail(&dev->list, &c->devices); + list_add_tail(&i2o_dev->list, &c->devices); - i2o_setup_sysfs_links(dev); + /* create user entries for this device */ + tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); + if (tmp && (tmp != i2o_dev)) + sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, + "user"); - i2o_driver_notify_device_add_all(dev); + /* create user entries refering to this device */ + list_for_each_entry(tmp, &c->devices, list) + if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) + && (tmp != i2o_dev)) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "user"); - pr_debug("i2o: device %s added\n", dev->device.bus_id); + /* create parent entries for this device */ + tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); + if (tmp && (tmp != i2o_dev)) + sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, + "parent"); - return dev; + /* create parent entries refering to this device */ + list_for_each_entry(tmp, &c->devices, list) + if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) + && (tmp != i2o_dev)) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "parent"); + + i2o_driver_notify_device_add_all(i2o_dev); + + pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id); + + return i2o_dev; } /** @@ -321,9 +283,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c, */ void i2o_device_remove(struct i2o_device *i2o_dev) { + struct i2o_device *tmp; + struct i2o_controller *c = i2o_dev->iop; + i2o_driver_notify_device_remove_all(i2o_dev); - i2o_remove_sysfs_links(i2o_dev); + + sysfs_remove_link(&i2o_dev->device.kobj, "parent"); + sysfs_remove_link(&i2o_dev->device.kobj, "user"); + + list_for_each_entry(tmp, &c->devices, list) { + if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) + sysfs_remove_link(&tmp->device.kobj, "parent"); + if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) + sysfs_remove_link(&tmp->device.kobj, "user"); + } list_del(&i2o_dev->list); + device_unregister(&i2o_dev->device); } @@ -341,56 +316,83 @@ int i2o_device_parse_lct(struct i2o_controller *c) { struct i2o_device *dev, *tmp; i2o_lct *lct; - int i; - int max; + u32 *dlct = c->dlct.virt; + int max = 0, i = 0; + u16 table_size; + u32 buf; down(&c->lct_lock); kfree(c->lct); - lct = c->dlct.virt; + buf = le32_to_cpu(*dlct++); + table_size = buf & 0xffff; - c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); - if (!c->lct) { + lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL); + if (!lct) { up(&c->lct_lock); return -ENOMEM; } - if (lct->table_size * 4 > c->dlct.len) { - memcpy(c->lct, c->dlct.virt, c->dlct.len); - up(&c->lct_lock); - return -EAGAIN; - } + lct->lct_ver = buf >> 28; + lct->boot_tid = buf >> 16 & 0xfff; + lct->table_size = table_size; + lct->change_ind = le32_to_cpu(*dlct++); + lct->iop_flags = le32_to_cpu(*dlct++); - memcpy(c->lct, c->dlct.virt, lct->table_size * 4); - - lct = c->lct; - - max = (lct->table_size - 3) / 9; + table_size -= 3; pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, lct->table_size); - /* remove devices, which are not in the LCT anymore */ - list_for_each_entry_safe(dev, tmp, &c->devices, list) { + while (table_size > 0) { + i2o_lct_entry *entry = &lct->lct_entry[max]; int found = 0; - for (i = 0; i < max; i++) { - if (lct->lct_entry[i].tid == dev->lct_data.tid) { + buf = le32_to_cpu(*dlct++); + entry->entry_size = buf & 0xffff; + entry->tid = buf >> 16 & 0xfff; + + entry->change_ind = le32_to_cpu(*dlct++); + entry->device_flags = le32_to_cpu(*dlct++); + + buf = le32_to_cpu(*dlct++); + entry->class_id = buf & 0xfff; + entry->version = buf >> 12 & 0xf; + entry->vendor_id = buf >> 16; + + entry->sub_class = le32_to_cpu(*dlct++); + + buf = le32_to_cpu(*dlct++); + entry->user_tid = buf & 0xfff; + entry->parent_tid = buf >> 12 & 0xfff; + entry->bios_info = buf >> 24; + + memcpy(&entry->identity_tag, dlct, 8); + dlct += 2; + + entry->event_capabilities = le32_to_cpu(*dlct++); + + /* add new devices, which are new in the LCT */ + list_for_each_entry_safe(dev, tmp, &c->devices, list) { + if (entry->tid == dev->lct_data.tid) { found = 1; break; } } if (!found) - i2o_device_remove(dev); + i2o_device_add(c, entry); + + table_size -= 9; + max++; } - /* add new devices, which are new in the LCT */ - for (i = 0; i < max; i++) { + /* remove devices, which are not in the LCT anymore */ + list_for_each_entry_safe(dev, tmp, &c->devices, list) { int found = 0; - list_for_each_entry_safe(dev, tmp, &c->devices, list) { + for (i = 0; i < max; i++) { if (lct->lct_entry[i].tid == dev->lct_data.tid) { found = 1; break; @@ -398,14 +400,14 @@ int i2o_device_parse_lct(struct i2o_controller *c) } if (!found) - i2o_device_add(c, &lct->lct_entry[i]); + i2o_device_remove(dev); } + up(&c->lct_lock); return 0; } - /* * Run time support routines */ @@ -419,13 +421,9 @@ int i2o_device_parse_lct(struct i2o_controller *c) * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. */ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, - int oplen, void *reslist, int reslen) + int oplen, void *reslist, int reslen) { - struct i2o_message __iomem *msg; - u32 m; - u32 *res32 = (u32 *) reslist; - u32 *restmp = (u32 *) reslist; - int len = 0; + struct i2o_message *msg; int i = 0; int rc; struct i2o_dma res; @@ -437,26 +435,27 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) return -ENOMEM; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) { + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) { i2o_dma_free(dev, &res); - return -ETIMEDOUT; + return PTR_ERR(msg); } i = 0; - writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, - &msg->u.head[1]); - writel(0, &msg->body[i++]); - writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ - memcpy_toio(&msg->body[i], oplist, oplen); + msg->u.head[1] = + cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid); + msg->body[i++] = cpu_to_le32(0x00000000); + msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */ + memcpy(&msg->body[i], oplist, oplen); i += (oplen / 4 + (oplen % 4 ? 1 : 0)); - writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ - writel(res.phys, &msg->body[i++]); + msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */ + msg->body[i++] = cpu_to_le32(res.phys); - writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | - SGL_OFFSET_5, &msg->u.head[0]); + msg->u.head[0] = + cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | + SGL_OFFSET_5); - rc = i2o_msg_post_wait_mem(c, m, 10, &res); + rc = i2o_msg_post_wait_mem(c, msg, 10, &res); /* This only looks like a memory leak - don't "fix" it. */ if (rc == -ETIMEDOUT) @@ -465,36 +464,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, memcpy(reslist, res.virt, res.len); i2o_dma_free(dev, &res); - /* Query failed */ - if (rc) - return rc; - /* - * Calculate number of bytes of Result LIST - * We need to loop through each Result BLOCK and grab the length - */ - restmp = res32 + 1; - len = 1; - for (i = 0; i < (res32[0] & 0X0000FFFF); i++) { - if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */ - printk(KERN_WARNING - "%s - Error:\n ErrorInfoSize = 0x%02x, " - "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", - (cmd == - I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" : - "PARAMS_GET", res32[1] >> 24, - (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF); - - /* - * If this is the only request,than we return an error - */ - if ((res32[0] & 0x0000FFFF) == 1) { - return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */ - } - } - len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */ - restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */ - } - return (len << 2); /* bytes used by result list */ + return rc; } /* @@ -503,28 +473,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, void *buf, int buflen) { - u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; + u32 opblk[] = { cpu_to_le32(0x00000001), + cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET), + cpu_to_le32((s16) field << 16 | 0x00000001) + }; u8 *resblk; /* 8 bytes for header */ - int size; - - if (field == -1) /* whole group */ - opblk[4] = -1; + int rc; resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC); if (!resblk) return -ENOMEM; - size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, - sizeof(opblk), resblk, buflen + 8); + rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, + sizeof(opblk), resblk, buflen + 8); memcpy(buf, resblk + 8, buflen); /* cut off header */ kfree(resblk); - if (size > buflen) - return buflen; - - return size; + return rc; } /* @@ -534,12 +501,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, * else return specific fields * ibuf contains fieldindexes * - * if oper == I2O_PARAMS_LIST_GET, get from specific rows - * if fieldcount == -1 return all fields + * if oper == I2O_PARAMS_LIST_GET, get from specific rows + * if fieldcount == -1 return all fields * ibuf contains rowcount, keyvalues - * else return specific fields + * else return specific fields * fieldcount is # of fieldindexes - * ibuf contains fieldindexes, rowcount, keyvalues + * ibuf contains fieldindexes, rowcount, keyvalues * * You could also use directly function i2o_issue_params(). */ diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 0fb9c4e..6413022 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c @@ -61,12 +61,10 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv) }; /* I2O bus type */ -extern struct device_attribute i2o_device_attrs[]; - struct bus_type i2o_bus_type = { .name = "i2o", .match = i2o_bus_match, - .dev_attrs = i2o_device_attrs, + .dev_attrs = i2o_device_attrs }; /** @@ -219,14 +217,14 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m) /* cut of header from message size (in 32-bit words) */ size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; - evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO); + evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC); if (!evt) return -ENOMEM; evt->size = size; evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); evt->event_indicator = le32_to_cpu(msg->body[0]); - memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4); + memcpy(&evt->data, &msg->body[1], size * 4); list_for_each_entry_safe(dev, tmp, &c->devices, list) if (dev->lct_data.tid == tid) { @@ -349,12 +347,10 @@ int __init i2o_driver_init(void) osm_info("max drivers = %d\n", i2o_max_drivers); i2o_drivers = - kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); + kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); if (!i2o_drivers) return -ENOMEM; - memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers)); - rc = bus_register(&i2o_bus_type); if (rc < 0) diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index 9c339a2..9bb9859 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c @@ -33,7 +33,7 @@ #include <linux/workqueue.h> #include <linux/string.h> #include <linux/slab.h> -#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */ +#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */ #include <asm/param.h> /* HZ */ #include "core.h" @@ -75,11 +75,9 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void) { struct i2o_exec_wait *wait; - wait = kmalloc(sizeof(*wait), GFP_KERNEL); + wait = kzalloc(sizeof(*wait), GFP_KERNEL); if (!wait) - return ERR_PTR(-ENOMEM); - - memset(wait, 0, sizeof(*wait)); + return NULL; INIT_LIST_HEAD(&wait->list); @@ -114,13 +112,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait) * Returns 0 on success, negative error code on timeout or positive error * code from reply. */ -int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long - timeout, struct i2o_dma *dma) +int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg, + unsigned long timeout, struct i2o_dma *dma) { DECLARE_WAIT_QUEUE_HEAD(wq); struct i2o_exec_wait *wait; static u32 tcntxt = 0x80000000; - struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m); int rc = 0; wait = i2o_exec_wait_alloc(); @@ -138,15 +135,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long * We will only use transaction contexts >= 0x80000000 for POST WAIT, * so we could find a POST WAIT reply easier in the reply handler. */ - writel(i2o_exec_driver.context, &msg->u.s.icntxt); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); wait->tcntxt = tcntxt++; - writel(wait->tcntxt, &msg->u.s.tcntxt); + msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); /* * Post the message to the controller. At some point later it will * return. If we time out before it returns then complete will be zero. */ - i2o_msg_post(c, m); + i2o_msg_post(c, msg); if (!wait->complete) { wait->wq = &wq; @@ -266,13 +263,14 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, * * Returns number of bytes printed into buffer. */ -static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) +static ssize_t i2o_exec_show_vendor_id(struct device *d, + struct device_attribute *attr, char *buf) { struct i2o_device *dev = to_i2o_device(d); u16 id; - if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { - sprintf(buf, "0x%04x", id); + if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { + sprintf(buf, "0x%04x", le16_to_cpu(id)); return strlen(buf) + 1; } @@ -286,13 +284,15 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute * * Returns number of bytes printed into buffer. */ -static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) +static ssize_t i2o_exec_show_product_id(struct device *d, + struct device_attribute *attr, + char *buf) { struct i2o_device *dev = to_i2o_device(d); u16 id; - if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { - sprintf(buf, "0x%04x", id); + if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { + sprintf(buf, "0x%04x", le16_to_cpu(id)); return strlen(buf) + 1; } @@ -362,7 +362,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c) if (i2o_device_parse_lct(c) != -EAGAIN) change_ind = c->lct->change_ind + 1; +#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES i2o_exec_lct_notify(c, change_ind); +#endif }; /** @@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, u32 context; if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { + struct i2o_message __iomem *pmsg; + u32 pm; + /* * If Fail bit is set we must take the transaction context of * the preserved message to find the right request again. */ - struct i2o_message __iomem *pmsg; - u32 pm; pm = le32_to_cpu(msg->body[3]); - pmsg = i2o_msg_in_to_virt(c, pm); + context = readl(&pmsg->u.s.tcntxt); i2o_report_status(KERN_INFO, "i2o_core", msg); - context = readl(&pmsg->u.s.tcntxt); - /* Release the preserved msg */ - i2o_msg_nop(c, pm); + i2o_msg_nop_mfa(c, pm); } else context = le32_to_cpu(msg->u.s.tcntxt); @@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt) */ int i2o_exec_lct_get(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int i = 0; int rc = -EAGAIN; for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(0xffffffff, &msg->body[0]); - writel(0x00000000, &msg->body[1]); - writel(0xd0000000 | c->dlct.len, &msg->body[2]); - writel(c->dlct.phys, &msg->body[3]); - - rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = + cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->body[0] = cpu_to_le32(0xffffffff); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); + msg->body[3] = cpu_to_le32(c->dlct.phys); + + rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET); if (rc < 0) break; @@ -506,29 +508,29 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) { i2o_status_block *sb = c->status_block.virt; struct device *dev; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; dev = &c->pdev->dev; - if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) + if (i2o_dma_realloc + (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL)) return -ENOMEM; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0, &msg->u.s.tcntxt); /* FIXME */ - writel(0xffffffff, &msg->body[0]); - writel(change_ind, &msg->body[1]); - writel(0xd0000000 | c->dlct.len, &msg->body[2]); - writel(c->dlct.phys, &msg->body[3]); - - i2o_msg_post(c, m); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); + msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(0xffffffff); + msg->body[1] = cpu_to_le32(change_ind); + msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); + msg->body[3] = cpu_to_le32(c->dlct.phys); + + i2o_msg_post(c, msg); return 0; }; diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 4f52252..5b1febe 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -59,10 +59,12 @@ #include <linux/blkdev.h> #include <linux/hdreg.h> +#include <scsi/scsi.h> + #include "i2o_block.h" #define OSM_NAME "block-osm" -#define OSM_VERSION "1.287" +#define OSM_VERSION "1.325" #define OSM_DESCRIPTION "I2O Block Device OSM" static struct i2o_driver i2o_block_driver; @@ -130,20 +132,20 @@ static int i2o_block_remove(struct device *dev) */ static int i2o_block_device_flush(struct i2o_device *dev) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(60 << 16, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(60 << 16); osm_debug("Flushing...\n"); - return i2o_msg_post_wait(dev->iop, m, 60); + return i2o_msg_post_wait(dev->iop, msg, 60); }; /** @@ -181,21 +183,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, */ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) { - struct i2o_message __iomem *msg; - u32 m; - - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(-1, &msg->body[0]); - writel(0, &msg->body[1]); + struct i2o_message *msg; + + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(-1); + msg->body[1] = cpu_to_le32(0x00000000); osm_debug("Mounting...\n"); - return i2o_msg_post_wait(dev->iop, m, 2); + return i2o_msg_post_wait(dev->iop, msg, 2); }; /** @@ -210,20 +212,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) */ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg) == I2O_QUEUE_EMPTY) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(-1, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(-1); osm_debug("Locking...\n"); - return i2o_msg_post_wait(dev->iop, m, 2); + return i2o_msg_post_wait(dev->iop, msg, 2); }; /** @@ -238,20 +240,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) */ static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, - &msg->u.head[1]); - writel(media_id, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(media_id); osm_debug("Unlocking...\n"); - return i2o_msg_post_wait(dev->iop, m, 2); + return i2o_msg_post_wait(dev->iop, msg, 2); }; /** @@ -267,21 +269,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) { struct i2o_device *i2o_dev = dev->i2o_dev; struct i2o_controller *c = i2o_dev->iop; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int rc; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. - tid, &msg->u.head[1]); - writel(op << 24, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> + lct_data.tid); + msg->body[0] = cpu_to_le32(op << 24); osm_debug("Power...\n"); - rc = i2o_msg_post_wait(c, m, 60); + rc = i2o_msg_post_wait(c, msg, 60); if (!rc) dev->power = op; @@ -331,7 +333,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq) */ static inline int i2o_block_sglist_alloc(struct i2o_controller *c, struct i2o_block_request *ireq, - u32 __iomem ** mptr) + u32 ** mptr) { int nents; enum dma_data_direction direction; @@ -745,10 +747,9 @@ static int i2o_block_transfer(struct request *req) struct i2o_block_device *dev = req->rq_disk->private_data; struct i2o_controller *c; int tid = dev->i2o_dev->lct_data.tid; - struct i2o_message __iomem *msg; - u32 __iomem *mptr; + struct i2o_message *msg; + u32 *mptr; struct i2o_block_request *ireq = req->special; - u32 m; u32 tcntxt; u32 sgl_offset = SGL_OFFSET_8; u32 ctl_flags = 0x00000000; @@ -763,9 +764,9 @@ static int i2o_block_transfer(struct request *req) c = dev->i2o_dev->iop; - m = i2o_msg_get(c, &msg); - if (m == I2O_QUEUE_EMPTY) { - rc = -EBUSY; + msg = i2o_msg_get(c); + if (IS_ERR(msg)) { + rc = PTR_ERR(msg); goto exit; } @@ -775,8 +776,8 @@ static int i2o_block_transfer(struct request *req) goto nop_msg; } - writel(i2o_block_driver.context, &msg->u.s.icntxt); - writel(tcntxt, &msg->u.s.tcntxt); + msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); + msg->u.s.tcntxt = cpu_to_le32(tcntxt); mptr = &msg->body[0]; @@ -834,11 +835,11 @@ static int i2o_block_transfer(struct request *req) sgl_offset = SGL_OFFSET_12; - writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, - &msg->u.head[1]); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); - writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); - writel(tid, mptr++); + *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); + *mptr++ = cpu_to_le32(tid); /* * ENABLE_DISCONNECT @@ -846,29 +847,31 @@ static int i2o_block_transfer(struct request *req) * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME */ if (rq_data_dir(req) == READ) { - cmd[0] = 0x28; + cmd[0] = READ_10; scsi_flags = 0x60a0000a; } else { - cmd[0] = 0x2A; + cmd[0] = WRITE_10; scsi_flags = 0xa0a0000a; } - writel(scsi_flags, mptr++); + *mptr++ = cpu_to_le32(scsi_flags); *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); - memcpy_toio(mptr, cmd, 10); + memcpy(mptr, cmd, 10); mptr += 4; - writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); + *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); } else #endif { - writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); - writel(ctl_flags, mptr++); - writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); - writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); - writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); + msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); + *mptr++ = cpu_to_le32(ctl_flags); + *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); + *mptr++ = + cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); + *mptr++ = + cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); } if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { @@ -876,13 +879,13 @@ static int i2o_block_transfer(struct request *req) goto context_remove; } - writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | - sgl_offset, &msg->u.head[0]); + msg->u.head[0] = + cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); list_add_tail(&ireq->queue, &dev->open_queue); dev->open_queue_depth++; - i2o_msg_post(c, m); + i2o_msg_post(c, msg); return 0; @@ -890,7 +893,7 @@ static int i2o_block_transfer(struct request *req) i2o_cntxt_list_remove(c, req); nop_msg: - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); exit: return rc; @@ -978,13 +981,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void) struct request_queue *queue; int rc; - dev = kmalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { osm_err("Insufficient memory to allocate I2O Block disk.\n"); rc = -ENOMEM; goto exit; } - memset(dev, 0, sizeof(*dev)); INIT_LIST_HEAD(&dev->open_queue); spin_lock_init(&dev->lock); @@ -1049,8 +1051,8 @@ static int i2o_block_probe(struct device *dev) int rc; u64 size; u32 blocksize; - u32 flags, status; u16 body_size = 4; + u16 power; unsigned short max_sectors; #ifdef CONFIG_I2O_EXT_ADAPTEC @@ -1108,22 +1110,20 @@ static int i2o_block_probe(struct device *dev) * Ask for the current media data. If that isn't supported * then we ask for the device capacity data */ - if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || - i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { - blk_queue_hardsect_size(queue, blocksize); + if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || + !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { + blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); } else osm_warn("unable to get blocksize of %s\n", gd->disk_name); - if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || - i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { - set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); + if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || + !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { + set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); } else osm_warn("could not get size of %s\n", gd->disk_name); - if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) - i2o_blk_dev->power = 0; - i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); - i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); + if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) + i2o_blk_dev->power = power; i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 3c3a7ab..89daf67 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -36,12 +36,12 @@ #include <asm/uaccess.h> -#include "core.h" - #define SG_TABLESIZE 30 -static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, - unsigned long arg); +extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); + +static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int, + unsigned long); static spinlock_t i2o_config_lock; @@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg) struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; @@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); return -ENOMEM; } __copy_from_user(buffer.virt, kxfer.buf, fragsize); - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); - writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | - (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); - writel(0xD0000000 | fragsize, &msg->body[3]); - writel(buffer.phys, &msg->body[4]); + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer. + sw_type) << 16) | + (((u32) maxfrag) << 8) | (((u32) curfrag))); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); + msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); + msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != -ETIMEDOUT) i2o_dma_free(&c->pdev->dev, &buffer); @@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg) struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; int ret = 0; @@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); return -ENOMEM; } - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); - writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((u32) kxfer.flags << 24 | (u32) kxfer. - sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, - &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); - writel(0xD0000000 | fragsize, &msg->body[3]); - writel(buffer.phys, &msg->body[4]); + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer. + sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); + msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); + msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != I2O_POST_WAIT_OK) { if (status != -ETIMEDOUT) @@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg) struct i2o_controller *c; struct i2o_sw_xfer kxfer; struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int swlen; int token; @@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, - &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); + msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); - token = i2o_msg_post_wait(c, m, 10); + token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("swdel failed, DetailedStatus = %d\n", token); @@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg) { int token; int iop = (int)arg; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; struct i2o_controller *c; c = i2o_find_iop(iop); if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); - token = i2o_msg_post_wait(c, m, 10); + token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("Can't validate configuration, ErrorStatus = %d\n", @@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg) static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; struct i2o_evt_id kdesc; struct i2o_controller *c; @@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) if (!d) return -ENODEV; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); - writel(kdesc.evt_mask, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | + kdesc.tid); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data)); + msg->body[0] = cpu_to_le32(kdesc.evt_mask); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); return 0; } @@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, u32 sg_index = 0; i2o_status_block *sb; struct i2o_message *msg; - u32 m; unsigned int iop; cmd = (struct i2o_cmd_passthru32 __user *)arg; @@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, return -ENXIO; } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); sb = c->status_block.virt; @@ -585,19 +583,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, reply_size >>= 16; reply_size <<= 2; - reply = kmalloc(reply_size, GFP_KERNEL); + reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); return -ENOMEM; } - memset(reply, 0, reply_size); sg_offset = (msg->u.head[0] >> 4) & 0x0f; - writel(i2o_config_driver.context, &msg->u.s.icntxt); - writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); - memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; @@ -631,7 +625,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, goto cleanup; } sg_size = sg[i].flag_count & 0xffffff; - p = &(sg_list[sg_index++]); + p = &(sg_list[sg_index]); /* Allocate memory for the transfer */ if (i2o_dma_alloc (&c->pdev->dev, p, sg_size, @@ -642,6 +636,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, rcode = -ENOMEM; goto sg_list_cleanup; } + sg_index++; /* Copy in the user's SG buffer if necessary */ if (sg[i]. flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { @@ -662,9 +657,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } - rcode = i2o_msg_post_wait(c, m, 60); - if (rcode) + rcode = i2o_msg_post_wait(c, msg, 60); + if (rcode) { + reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; + } if (sg_offset) { u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; @@ -714,6 +711,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } + sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones @@ -731,7 +729,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } - sg_list_cleanup: for (i = 0; i < sg_index; i++) i2o_dma_free(&c->pdev->dev, &sg_list[i]); @@ -780,8 +777,7 @@ static int i2o_cfg_passthru(unsigned long arg) u32 i = 0; void *p = NULL; i2o_status_block *sb; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int iop; if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) @@ -793,7 +789,7 @@ static int i2o_cfg_passthru(unsigned long arg) return -ENXIO; } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); sb = c->status_block.virt; @@ -820,19 +816,15 @@ static int i2o_cfg_passthru(unsigned long arg) reply_size >>= 16; reply_size <<= 2; - reply = kmalloc(reply_size, GFP_KERNEL); + reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); return -ENOMEM; } - memset(reply, 0, reply_size); sg_offset = (msg->u.head[0] >> 4) & 0x0f; - writel(i2o_config_driver.context, &msg->u.s.icntxt); - writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); - memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; @@ -894,9 +886,11 @@ static int i2o_cfg_passthru(unsigned long arg) } } - rcode = i2o_msg_post_wait(c, m, 60); - if (rcode) + rcode = i2o_msg_post_wait(c, msg, 60); + if (rcode) { + reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; + } if (sg_offset) { u32 msg[128]; @@ -946,6 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg) } } + sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones @@ -962,7 +957,6 @@ static int i2o_cfg_passthru(unsigned long arg) } } - sg_list_cleanup: for (i = 0; i < sg_index; i++) kfree(sg_list[i]); diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h index 561d633..6502b81 100644 --- a/drivers/message/i2o/i2o_lan.h +++ b/drivers/message/i2o/i2o_lan.h @@ -103,14 +103,14 @@ #define I2O_LAN_DSC_SUSPENDED 0x11 struct i2o_packet_info { - u32 offset : 24; - u32 flags : 8; - u32 len : 24; - u32 status : 8; + u32 offset:24; + u32 flags:8; + u32 len:24; + u32 status:8; }; struct i2o_bucket_descriptor { - u32 context; /* FIXME: 64bit support */ + u32 context; /* FIXME: 64bit support */ struct i2o_packet_info packet_info[1]; }; @@ -127,14 +127,14 @@ struct i2o_lan_local { u8 unit; struct i2o_device *i2o_dev; - struct fddi_statistics stats; /* see also struct net_device_stats */ - unsigned short (*type_trans)(struct sk_buff *, struct net_device *); - atomic_t buckets_out; /* nbr of unused buckets on DDM */ - atomic_t tx_out; /* outstanding TXes */ - u8 tx_count; /* packets in one TX message frame */ - u16 tx_max_out; /* DDM's Tx queue len */ - u8 sgl_max; /* max SGLs in one message frame */ - u32 m; /* IOP address of the batch msg frame */ + struct fddi_statistics stats; /* see also struct net_device_stats */ + unsigned short (*type_trans) (struct sk_buff *, struct net_device *); + atomic_t buckets_out; /* nbr of unused buckets on DDM */ + atomic_t tx_out; /* outstanding TXes */ + u8 tx_count; /* packets in one TX message frame */ + u16 tx_max_out; /* DDM's Tx queue len */ + u8 sgl_max; /* max SGLs in one message frame */ + u32 m; /* IOP address of the batch msg frame */ struct work_struct i2o_batch_send_task; int send_active; @@ -144,16 +144,16 @@ struct i2o_lan_local { spinlock_t tx_lock; - u32 max_size_mc_table; /* max number of multicast addresses */ + u32 max_size_mc_table; /* max number of multicast addresses */ /* LAN OSM configurable parameters are here: */ - u16 max_buckets_out; /* max nbr of buckets to send to DDM */ - u16 bucket_thresh; /* send more when this many used */ + u16 max_buckets_out; /* max nbr of buckets to send to DDM */ + u16 bucket_thresh; /* send more when this many used */ u16 rx_copybreak; - u8 tx_batch_mode; /* Set when using batch mode sends */ - u32 i2o_event_mask; /* To turn on interesting event flags */ + u8 tx_batch_mode; /* Set when using batch mode sends */ + u32 i2o_event_mask; /* To turn on interesting event flags */ }; -#endif /* _I2O_LAN_H */ +#endif /* _I2O_LAN_H */ diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c index d559a17..2a0c42b 100644 --- a/drivers/message/i2o/i2o_proc.c +++ b/drivers/message/i2o/i2o_proc.c @@ -28,7 +28,7 @@ */ #define OSM_NAME "proc-osm" -#define OSM_VERSION "1.145" +#define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O ProcFS OSM" #define I2O_MAX_MODULES 4 diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index 9f1744c..f9e5a23 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c @@ -70,7 +70,7 @@ #include <scsi/sg_request.h> #define OSM_NAME "scsi-osm" -#define OSM_VERSION "1.282" +#define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" static struct i2o_driver i2o_scsi_driver; @@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) list_for_each_entry(i2o_dev, &c->devices, list) if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { - if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) + if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) && (type == 0x01)) /* SCSI bus */ max_channel++; } @@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) i = 0; list_for_each_entry(i2o_dev, &c->devices, list) if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { - if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) + if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) && (type == 0x01)) /* only SCSI bus */ i2o_shost->channel[i++] = i2o_dev; @@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev) u8 type; struct i2o_device *d = i2o_shost->channel[0]; - if (i2o_parm_field_get(d, 0x0000, 0, &type, 1) + if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1) && (type == 0x01)) /* SCSI bus */ - if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { + if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { channel = 0; if (i2o_dev->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE) - lun = i2o_shost->lun++; + lun = + cpu_to_le64(i2o_shost-> + lun++); else lun = 0; } @@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev) break; case I2O_CLASS_SCSI_PERIPHERAL: - if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0) + if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4)) return -EFAULT; - if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0) + if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8)) return -EFAULT; parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); @@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev) return -EFAULT; } - if (id >= scsi_host->max_id) { - osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, - scsi_host->max_id); + if (le32_to_cpu(id) >= scsi_host->max_id) { + osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", + le32_to_cpu(id), scsi_host->max_id); return -EFAULT; } - if (lun >= scsi_host->max_lun) { - osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", - (unsigned int)lun, scsi_host->max_lun); + if (le64_to_cpu(lun) >= scsi_host->max_lun) { + osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)", + (long unsigned int)le64_to_cpu(lun), + scsi_host->max_lun); return -EFAULT; } scsi_dev = - __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); + __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id), + le64_to_cpu(lun), i2o_dev); if (IS_ERR(scsi_dev)) { osm_warn("can not add SCSI device %03x\n", @@ -305,8 +309,9 @@ static int i2o_scsi_probe(struct device *dev) sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, "scsi"); - osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", - i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); + osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n", + i2o_dev->lct_data.tid, channel, le32_to_cpu(id), + (long unsigned int)le64_to_cpu(lun)); return 0; }; @@ -510,8 +515,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, struct i2o_controller *c; struct i2o_device *i2o_dev; int tid; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; /* * ENABLE_DISCONNECT * SIMPLE_TAG @@ -519,7 +523,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, */ u32 scsi_flags = 0x20a00000; u32 sgl_offset; - u32 __iomem *mptr; + u32 *mptr; u32 cmd = I2O_CMD_SCSI_EXEC << 24; int rc = 0; @@ -576,8 +580,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, * throw it back to the scsi layer */ - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) { + msg = i2o_msg_get(c); + if (IS_ERR(msg)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto exit; } @@ -617,16 +621,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, if (sgl_offset == SGL_OFFSET_10) sgl_offset = SGL_OFFSET_12; cmd = I2O_CMD_PRIVATE << 24; - writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); - writel(adpt_flags | tid, mptr++); + *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); + *mptr++ = cpu_to_le32(adpt_flags | tid); } #endif - writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); - writel(i2o_scsi_driver.context, &msg->u.s.icntxt); + msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); + msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context); /* We want the SCSI control block back */ - writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); + msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt)); /* LSI_920_PCI_QUIRK * @@ -649,15 +653,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, } */ - writel(scsi_flags | SCpnt->cmd_len, mptr++); + *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len); /* Write SCSI command into the message - always 16 byte block */ - memcpy_toio(mptr, SCpnt->cmnd, 16); + memcpy(mptr, SCpnt->cmnd, 16); mptr += 4; if (sgl_offset != SGL_OFFSET_0) { /* write size of data addressed by SGL */ - writel(SCpnt->request_bufflen, mptr++); + *mptr++ = cpu_to_le32(SCpnt->request_bufflen); /* Now fill in the SGList and command */ if (SCpnt->use_sg) { @@ -676,11 +680,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, } /* Stick the headers on */ - writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, - &msg->u.head[0]); + msg->u.head[0] = + cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); /* Queue the message */ - i2o_msg_post(c, m); + i2o_msg_post(c, msg); osm_debug("Issued %ld\n", SCpnt->serial_number); @@ -688,7 +692,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, nomem: rc = -ENOMEM; - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); exit: return rc; @@ -709,8 +713,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt) { struct i2o_device *i2o_dev; struct i2o_controller *c; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int tid; int status = FAILED; @@ -720,16 +723,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt) c = i2o_dev->iop; tid = i2o_dev->lct_data.tid; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) return SCSI_MLQUEUE_HOST_BUSY; - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, - &msg->u.head[1]); - writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid); + msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt)); - if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) + if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT)) status = SUCCESS; return status; diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 4eb5325..4921674 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c @@ -32,7 +32,7 @@ #include "core.h" #define OSM_NAME "i2o" -#define OSM_VERSION "1.288" +#define OSM_VERSION "1.325" #define OSM_DESCRIPTION "I2O subsystem" /* global I2O controller list */ @@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab; static int i2o_hrt_get(struct i2o_controller *c); /** - * i2o_msg_nop - Returns a message which is not used - * @c: I2O controller from which the message was created - * @m: message which should be returned - * - * If you fetch a message via i2o_msg_get, and can't use it, you must - * return the message with this function. Otherwise the message frame - * is lost. - */ -void i2o_msg_nop(struct i2o_controller *c, u32 m) -{ - struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m); - - writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(0, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - i2o_msg_post(c, m); -}; - -/** * i2o_msg_get_wait - obtain an I2O message from the IOP * @c: I2O controller * @msg: pointer to a I2O message pointer @@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m) * address from the read port (see the i2o spec). If no message is * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. */ -u32 i2o_msg_get_wait(struct i2o_controller *c, - struct i2o_message __iomem ** msg, int wait) +struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait) { unsigned long timeout = jiffies + wait * HZ; - u32 m; + struct i2o_message *msg; - while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { + while (IS_ERR(msg = i2o_msg_get(c))) { if (time_after(jiffies, timeout)) { osm_debug("%s: Timeout waiting for message frame.\n", c->name); - return I2O_QUEUE_EMPTY; + return ERR_PTR(-ETIMEDOUT); } schedule_timeout_uninterruptible(1); } - return m; + return msg; }; #if BITS_PER_LONG == 64 @@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid) */ static int i2o_iop_quiesce(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; i2o_status_block *sb = c->status_block.virt; int rc; @@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c) (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) return 0; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | + ADAPTER_TID); /* Long timeout needed for quiesce if lots of devices */ - if ((rc = i2o_msg_post_wait(c, m, 240))) + if ((rc = i2o_msg_post_wait(c, msg, 240))) osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); else osm_debug("%s: Quiesced.\n", c->name); @@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c) */ static int i2o_iop_enable(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; i2o_status_block *sb = c->status_block.virt; int rc; @@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c) if (sb->iop_state != ADAPTER_STATE_READY) return -EINVAL; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | + ADAPTER_TID); /* How long of a timeout do we need? */ - if ((rc = i2o_msg_post_wait(c, m, 240))) + if ((rc = i2o_msg_post_wait(c, msg, 240))) osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); else osm_debug("%s: Enabled.\n", c->name); @@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void) */ static int i2o_iop_clear(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; int rc; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); /* Quiesce all IOPs first */ i2o_iop_quiesce_all(); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | + ADAPTER_TID); - if ((rc = i2o_msg_post_wait(c, m, 30))) + if ((rc = i2o_msg_post_wait(c, msg, 30))) osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); else osm_debug("%s: Cleared.\n", c->name); @@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c) * Clear and (re)initialize IOP's outbound queue and post the message * frames to the IOP. * - * Returns 0 on success or a negative errno code on failure. + * Returns 0 on success or negative error code on failure. */ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) { - volatile u8 *status = c->status.virt; u32 m; - struct i2o_message __iomem *msg; + volatile u8 *status = c->status.virt; + struct i2o_message *msg; ulong timeout; int i; @@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) memset(c->status.virt, 0, 4); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; - - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0x00000000, &msg->u.s.tcntxt); - writel(PAGE_SIZE, &msg->body[0]); + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(PAGE_SIZE); /* Outbound msg frame size in words and Initcode */ - writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); - writel(0xd0000004, &msg->body[2]); - writel(i2o_dma_low(c->status.phys), &msg->body[3]); - writel(i2o_dma_high(c->status.phys), &msg->body[4]); + msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80); + msg->body[2] = cpu_to_le32(0xd0000004); + msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys)); + msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys)); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; while (*status <= I2O_CMD_IN_PROGRESS) { @@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) static int i2o_iop_reset(struct i2o_controller *c) { volatile u8 *status = c->status.virt; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned long timeout; i2o_status_block *sb = c->status_block.virt; int rc = 0; osm_debug("%s: Resetting controller\n", c->name); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); memset(c->status_block.virt, 0, 8); /* Quiesce all IOPs first */ i2o_iop_quiesce_all(); - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context - writel(0, &msg->body[0]); - writel(0, &msg->body[1]); - writel(i2o_dma_low(c->status.phys), &msg->body[2]); - writel(i2o_dma_high(c->status.phys), &msg->body[3]); + msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(0x00000000); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys)); + msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys)); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); /* Wait for a reply */ timeout = jiffies + I2O_TIMEOUT_RESET * HZ; @@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c) osm_debug("%s: Reset in progress, waiting for reboot...\n", c->name); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); - while (m == I2O_QUEUE_EMPTY) { + while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) { if (time_after(jiffies, timeout)) { osm_err("%s: IOP reset timeout.\n", c->name); - rc = -ETIMEDOUT; + rc = PTR_ERR(msg); goto exit; } schedule_timeout_uninterruptible(1); - - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); } - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); /* from here all quiesce commands are safe */ c->no_quiesce = 0; @@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c) */ static int i2o_iop_systab_set(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; i2o_status_block *sb = c->status_block.virt; struct device *dev = &c->pdev->dev; struct resource *root; @@ -735,41 +710,38 @@ static int i2o_iop_systab_set(struct i2o_controller *c) } } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, PCI_DMA_TODEVICE); if (!i2o_systab.phys) { - i2o_msg_nop(c, m); + i2o_msg_nop(c, msg); return -ENOMEM; } - writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); - writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); + msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | + ADAPTER_TID); /* * Provide three SGL-elements: * System table (SysTab), Private memory space declaration and * Private i/o space declaration - * - * FIXME: is this still true? - * Nasty one here. We can't use dma_alloc_coherent to send the - * same table to everyone. We have to go remap it for them all */ - writel(c->unit + 2, &msg->body[0]); - writel(0, &msg->body[1]); - writel(0x54000000 | i2o_systab.len, &msg->body[2]); - writel(i2o_systab.phys, &msg->body[3]); - writel(0x54000000 | sb->current_mem_size, &msg->body[4]); - writel(sb->current_mem_base, &msg->body[5]); - writel(0xd4000000 | sb->current_io_size, &msg->body[6]); - writel(sb->current_io_base, &msg->body[6]); + msg->body[0] = cpu_to_le32(c->unit + 2); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len); + msg->body[3] = cpu_to_le32(i2o_systab.phys); + msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size); + msg->body[5] = cpu_to_le32(sb->current_mem_base); + msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size); + msg->body[6] = cpu_to_le32(sb->current_io_base); - rc = i2o_msg_post_wait(c, m, 120); + rc = i2o_msg_post_wait(c, msg, 120); dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, PCI_DMA_TODEVICE); @@ -780,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c) else osm_debug("%s: SysTab set.\n", c->name); - i2o_status_get(c); // Entered READY state - return rc; } @@ -791,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c) * * Send the system table and enable the I2O controller. * - * Returns 0 on success or negativer error code on failure. + * Returns 0 on success or negative error code on failure. */ static int i2o_iop_online(struct i2o_controller *c) { @@ -830,7 +800,6 @@ void i2o_iop_remove(struct i2o_controller *c) list_for_each_entry_safe(dev, tmp, &c->devices, list) i2o_device_remove(dev); - class_device_unregister(c->classdev); device_del(&c->device); /* Ask the IOP to switch to RESET state */ @@ -869,12 +838,11 @@ static int i2o_systab_build(void) i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * sizeof(struct i2o_sys_tbl_entry); - systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); + systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL); if (!systab) { osm_err("unable to allocate memory for System Table\n"); return -ENOMEM; } - memset(systab, 0, i2o_systab.len); systab->version = I2OVERSION; systab->change_ind = change_ind + 1; @@ -952,30 +920,30 @@ static int i2o_parse_hrt(struct i2o_controller *c) */ int i2o_status_get(struct i2o_controller *c) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; volatile u8 *status_block; unsigned long timeout; status_block = (u8 *) c->status_block.virt; memset(c->status_block.virt, 0, sizeof(i2o_status_block)); - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_exec_driver.context, &msg->u.s.icntxt); - writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context - writel(0, &msg->body[0]); - writel(0, &msg->body[1]); - writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); - writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); - writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); + msg->u.s.tcntxt = cpu_to_le32(0x00000000); + msg->body[0] = cpu_to_le32(0x00000000); + msg->body[1] = cpu_to_le32(0x00000000); + msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys)); + msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys)); + msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */ - i2o_msg_post(c, m); + i2o_msg_post(c, msg); /* Wait for a reply */ timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; @@ -1002,7 +970,7 @@ int i2o_status_get(struct i2o_controller *c) * The HRT contains information about possible hidden devices but is * mostly useless to us. * - * Returns 0 on success or negativer error code on failure. + * Returns 0 on success or negative error code on failure. */ static int i2o_hrt_get(struct i2o_controller *c) { @@ -1013,20 +981,20 @@ static int i2o_hrt_get(struct i2o_controller *c) struct device *dev = &c->pdev->dev; for (i = 0; i < I2O_HRT_GET_TRIES; i++) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); - writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(0xd0000000 | c->hrt.len, &msg->body[0]); - writel(c->hrt.phys, &msg->body[1]); + msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len); + msg->body[1] = cpu_to_le32(c->hrt.phys); - rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); + rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt); if (rc < 0) { osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, @@ -1051,15 +1019,6 @@ static int i2o_hrt_get(struct i2o_controller *c) } /** - * i2o_iop_free - Free the i2o_controller struct - * @c: I2O controller to free - */ -void i2o_iop_free(struct i2o_controller *c) -{ - kfree(c); -}; - -/** * i2o_iop_release - release the memory for a I2O controller * @dev: I2O controller which should be released * @@ -1073,14 +1032,11 @@ static void i2o_iop_release(struct device *dev) i2o_iop_free(c); }; -/* I2O controller class */ -static struct class *i2o_controller_class; - /** * i2o_iop_alloc - Allocate and initialize a i2o_controller struct * * Allocate the necessary memory for a i2o_controller struct and - * initialize the lists. + * initialize the lists and message mempool. * * Returns a pointer to the I2O controller or a negative error code on * failure. @@ -1089,20 +1045,29 @@ struct i2o_controller *i2o_iop_alloc(void) { static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ struct i2o_controller *c; + char poolname[32]; - c = kmalloc(sizeof(*c), GFP_KERNEL); + c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) { osm_err("i2o: Insufficient memory to allocate a I2O controller." "\n"); return ERR_PTR(-ENOMEM); } - memset(c, 0, sizeof(*c)); + + c->unit = unit++; + sprintf(c->name, "iop%d", c->unit); + + snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); + if (i2o_pool_alloc + (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4, + I2O_MSG_INPOOL_MIN)) { + kfree(c); + return ERR_PTR(-ENOMEM); + }; INIT_LIST_HEAD(&c->devices); spin_lock_init(&c->lock); init_MUTEX(&c->lct_lock); - c->unit = unit++; - sprintf(c->name, "iop%d", c->unit); device_initialize(&c->device); @@ -1137,36 +1102,29 @@ int i2o_iop_add(struct i2o_controller *c) goto iop_reset; } - c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0), - &c->device, "iop%d", c->unit); - if (IS_ERR(c->classdev)) { - osm_err("%s: could not add controller class\n", c->name); - goto device_del; - } - osm_info("%s: Activating I2O controller...\n", c->name); osm_info("%s: This may take a few minutes if there are many devices\n", c->name); if ((rc = i2o_iop_activate(c))) { osm_err("%s: could not activate controller\n", c->name); - goto class_del; + goto device_del; } osm_debug("%s: building sys table...\n", c->name); if ((rc = i2o_systab_build())) - goto class_del; + goto device_del; osm_debug("%s: online controller...\n", c->name); if ((rc = i2o_iop_online(c))) - goto class_del; + goto device_del; osm_debug("%s: getting LCT...\n", c->name); if ((rc = i2o_exec_lct_get(c))) - goto class_del; + goto device_del; list_add(&c->list, &i2o_controllers); @@ -1176,9 +1134,6 @@ int i2o_iop_add(struct i2o_controller *c) return 0; - class_del: - class_device_unregister(c->classdev); - device_del: device_del(&c->device); @@ -1199,28 +1154,27 @@ int i2o_iop_add(struct i2o_controller *c) * is waited for, or expected. If you do not want further notifications, * call the i2o_event_register again with a evt_mask of 0. * - * Returns 0 on success or -ETIMEDOUT if no message could be fetched for - * sending the request. + * Returns 0 on success or negative error code on failure. */ int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, int tcntxt, u32 evt_mask) { struct i2o_controller *c = dev->iop; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -ETIMEDOUT; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. - tid, &msg->u.head[1]); - writel(drv->context, &msg->u.s.icntxt); - writel(tcntxt, &msg->u.s.tcntxt); - writel(evt_mask, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev-> + lct_data.tid); + msg->u.s.icntxt = cpu_to_le32(drv->context); + msg->u.s.tcntxt = cpu_to_le32(tcntxt); + msg->body[0] = cpu_to_le32(evt_mask); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); return 0; }; @@ -1239,14 +1193,8 @@ static int __init i2o_iop_init(void) printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); - i2o_controller_class = class_create(THIS_MODULE, "i2o_controller"); - if (IS_ERR(i2o_controller_class)) { - osm_err("can't register class i2o_controller\n"); - goto exit; - } - if ((rc = i2o_driver_init())) - goto class_exit; + goto exit; if ((rc = i2o_exec_init())) goto driver_exit; @@ -1262,9 +1210,6 @@ static int __init i2o_iop_init(void) driver_exit: i2o_driver_exit(); - class_exit: - class_destroy(i2o_controller_class); - exit: return rc; } @@ -1279,7 +1224,6 @@ static void __exit i2o_iop_exit(void) i2o_pci_exit(); i2o_exec_exit(); i2o_driver_exit(); - class_destroy(i2o_controller_class); }; module_init(i2o_iop_init); diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index ee7075f..c5b656c 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c @@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, pci_name(pdev)); c->pdev = pdev; - c->device.parent = get_device(&pdev->dev); + c->device.parent = &pdev->dev; /* Cards that fall apart if you hit them with large I/O loads... */ if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { @@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, if ((rc = i2o_iop_add(c))) goto uninstall; - get_device(&c->device); - if (i960) pci_write_config_word(i960, 0x42, 0x03ff); @@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, i2o_pci_free(c); free_controller: - put_device(c->device.parent); i2o_iop_free(c); disable: @@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev) printk(KERN_INFO "%s: Controller removed.\n", c->name); - put_device(c->device.parent); put_device(&c->device); }; @@ -483,4 +479,5 @@ void __exit i2o_pci_exit(void) { pci_unregister_driver(&i2o_pci_driver); }; + MODULE_DEVICE_TABLE(pci, i2o_pci_ids); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c782a63..fa39b94 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -6,7 +6,7 @@ menu "PHY device support" config PHYLIB tristate "PHY Device support and infrastructure" - depends on NET_ETHERNET && (BROKEN || !ARCH_S390) + depends on NET_ETHERNET && (BROKEN || !S390) help Ethernet controllers are usually attached to PHY devices. This option provides infrastructure for diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 1bd22cd..87ee327 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c @@ -98,7 +98,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" #include <linux/in.h> #include <linux/errno.h> #include <linux/delay.h> -#include <linux/lp.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -106,7 +105,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" #include <linux/skbuff.h> #include <linux/if_plip.h> #include <linux/workqueue.h> -#include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/parport.h> #include <linux/bitops.h> diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 725a141..b824156 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -77,7 +77,7 @@ config PARPORT_PC_SUPERIO config PARPORT_PC_PCMCIA tristate "Support for PCMCIA management for PC-style ports" - depends on PARPORT!=n && (PCMCIA!=n && PARPORT_PC=m && PARPORT_PC || PARPORT_PC=y && PCMCIA) + depends on PCMCIA && PARPORT_PC help Say Y here if you need PCMCIA support for your PC-style parallel ports. If unsure, say N. diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 075c7eb..9ee6732 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -144,9 +144,9 @@ again: add_dev (numdevs++, port, -1); /* Find out the legacy device's IEEE 1284 device ID. */ - deviceid = kmalloc (1000, GFP_KERNEL); + deviceid = kmalloc (1024, GFP_KERNEL); if (deviceid) { - if (parport_device_id (numdevs - 1, deviceid, 1000) > 2) + if (parport_device_id (numdevs - 1, deviceid, 1024) > 2) detected++; kfree (deviceid); @@ -252,7 +252,7 @@ struct pardevice *parport_open (int devnum, const char *name, selected = port->daisy; parport_release (dev); - if (selected != port->daisy) { + if (selected != daisy) { /* No corresponding device. */ parport_unregister_device (dev); return NULL; @@ -344,9 +344,9 @@ static int cpp_daisy (struct parport *port, int cmd) PARPORT_CONTROL_STROBE, PARPORT_CONTROL_STROBE); udelay (1); + s = parport_read_status (port); parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); udelay (1); - s = parport_read_status (port); parport_write_data (port, 0xff); udelay (2); return s; @@ -395,15 +395,15 @@ int parport_daisy_select (struct parport *port, int daisy, int mode) case IEEE1284_MODE_EPP: case IEEE1284_MODE_EPPSL: case IEEE1284_MODE_EPPSWE: - return (cpp_daisy (port, 0x20 + daisy) & - PARPORT_STATUS_ERROR); + return !(cpp_daisy (port, 0x20 + daisy) & + PARPORT_STATUS_ERROR); // For these modes we should switch to ECP mode: case IEEE1284_MODE_ECP: case IEEE1284_MODE_ECPRLE: case IEEE1284_MODE_ECPSWE: - return (cpp_daisy (port, 0xd0 + daisy) & - PARPORT_STATUS_ERROR); + return !(cpp_daisy (port, 0xd0 + daisy) & + PARPORT_STATUS_ERROR); // Nothing was told for BECP in Daisy chain specification. // May be it's wise to use ECP? @@ -413,8 +413,8 @@ int parport_daisy_select (struct parport *port, int daisy, int mode) case IEEE1284_MODE_BYTE: case IEEE1284_MODE_COMPAT: default: - return (cpp_daisy (port, 0xe0 + daisy) & - PARPORT_STATUS_ERROR); + return !(cpp_daisy (port, 0xe0 + daisy) & + PARPORT_STATUS_ERROR); } } @@ -436,7 +436,7 @@ static int select_port (struct parport *port) static int assign_addrs (struct parport *port) { - unsigned char s, last_dev; + unsigned char s; unsigned char daisy; int thisdev = numdevs; int detected; @@ -472,10 +472,13 @@ static int assign_addrs (struct parport *port) } parport_write_data (port, 0x78); udelay (2); - last_dev = 0; /* We've just been speaking to a device, so we - know there must be at least _one_ out there. */ + s = parport_read_status (port); - for (daisy = 0; daisy < 4; daisy++) { + for (daisy = 0; + (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)) + == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT) + && daisy < 4; + ++daisy) { parport_write_data (port, daisy); udelay (2); parport_frob_control (port, @@ -485,14 +488,18 @@ static int assign_addrs (struct parport *port) parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); udelay (1); - if (last_dev) - /* No more devices. */ - break; + add_dev (numdevs++, port, daisy); - last_dev = !(parport_read_status (port) - & PARPORT_STATUS_BUSY); + /* See if this device thought it was the last in the + * chain. */ + if (!(s & PARPORT_STATUS_BUSY)) + break; - add_dev (numdevs++, port, daisy); + /* We are seeing pass through status now. We see + last_dev from next device or if last_dev does not + work status lines from some non-daisy chain + device. */ + s = parport_read_status (port); } parport_write_data (port, 0xff); udelay (2); @@ -501,11 +508,11 @@ static int assign_addrs (struct parport *port) detected); /* Ask the new devices to introduce themselves. */ - deviceid = kmalloc (1000, GFP_KERNEL); + deviceid = kmalloc (1024, GFP_KERNEL); if (!deviceid) return 0; for (daisy = 0; thisdev < numdevs; thisdev++, daisy++) - parport_device_id (thisdev, deviceid, 1000); + parport_device_id (thisdev, deviceid, 1024); kfree (deviceid); return detected; diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index ce1e2aa..d6c7765 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -165,17 +165,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port, /* Does the error line indicate end of data? */ if (((i & 1) == 0) && (parport_read_status(port) & PARPORT_STATUS_ERROR)) { - port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; - DPRINTK (KERN_DEBUG - "%s: No more nibble data (%d bytes)\n", - port->name, i/2); - - /* Go to reverse idle phase. */ - parport_frob_control (port, - PARPORT_CONTROL_AUTOFD, - PARPORT_CONTROL_AUTOFD); - port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; - break; + goto end_of_data; } /* Event 7: Set nAutoFd low. */ @@ -225,18 +215,25 @@ size_t parport_ieee1284_read_nibble (struct parport *port, byte = nibble; } - i /= 2; /* i is now in bytes */ - if (i == len) { /* Read the last nibble without checking data avail. */ - port = port->physport; - if (parport_read_status (port) & PARPORT_STATUS_ERROR) - port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; + if (parport_read_status (port) & PARPORT_STATUS_ERROR) { + end_of_data: + DPRINTK (KERN_DEBUG + "%s: No more nibble data (%d bytes)\n", + port->name, i/2); + + /* Go to reverse idle phase. */ + parport_frob_control (port, + PARPORT_CONTROL_AUTOFD, + PARPORT_CONTROL_AUTOFD); + port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } else - port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; + port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; } - return i; + return i/2; #endif /* IEEE1284 support */ } @@ -256,17 +253,7 @@ size_t parport_ieee1284_read_byte (struct parport *port, /* Data available? */ if (parport_read_status (port) & PARPORT_STATUS_ERROR) { - port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; - DPRINTK (KERN_DEBUG - "%s: No more byte data (%Zd bytes)\n", - port->name, count); - - /* Go to reverse idle phase. */ - parport_frob_control (port, - PARPORT_CONTROL_AUTOFD, - PARPORT_CONTROL_AUTOFD); - port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; - break; + goto end_of_data; } /* Event 14: Place data bus in high impedance state. */ @@ -318,11 +305,20 @@ size_t parport_ieee1284_read_byte (struct parport *port, if (count == len) { /* Read the last byte without checking data avail. */ - port = port->physport; - if (parport_read_status (port) & PARPORT_STATUS_ERROR) - port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; + if (parport_read_status (port) & PARPORT_STATUS_ERROR) { + end_of_data: + DPRINTK (KERN_DEBUG + "%s: No more byte data (%Zd bytes)\n", + port->name, count); + + /* Go to reverse idle phase. */ + parport_frob_control (port, + PARPORT_CONTROL_AUTOFD, + PARPORT_CONTROL_AUTOFD); + port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } else - port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; + port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; } return count; diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index c6493ad..18e85cc 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -1169,7 +1169,7 @@ dump_parport_state ("fwd idle", port); /* GCC is not inlining extern inline function later overwriten to non-inline, so we use outlined_ variants here. */ -static struct parport_operations parport_pc_ops = +static const struct parport_operations parport_pc_ops = { .write_data = parport_pc_write_data, .read_data = parport_pc_read_data, @@ -1211,10 +1211,11 @@ static struct parport_operations parport_pc_ops = static void __devinit show_parconfig_smsc37c669(int io, int key) { int cr1,cr4,cra,cr23,cr26,cr27,i=0; - static const char *modes[]={ "SPP and Bidirectional (PS/2)", - "EPP and SPP", - "ECP", - "ECP and EPP" }; + static const char *const modes[]={ + "SPP and Bidirectional (PS/2)", + "EPP and SPP", + "ECP", + "ECP and EPP" }; outb(key,io); outb(key,io); @@ -1288,7 +1289,7 @@ static void __devinit show_parconfig_smsc37c669(int io, int key) static void __devinit show_parconfig_winbond(int io, int key) { int cr30,cr60,cr61,cr70,cr74,crf0,i=0; - static const char *modes[] = { + static const char *const modes[] = { "Standard (SPP) and Bidirectional(PS/2)", /* 0 */ "EPP-1.9 and SPP", "ECP", @@ -1297,7 +1298,9 @@ static void __devinit show_parconfig_winbond(int io, int key) "EPP-1.7 and SPP", /* 5 */ "undefined!", "ECP and EPP-1.7" }; - static char *irqtypes[] = { "pulsed low, high-Z", "follows nACK" }; + static char *const irqtypes[] = { + "pulsed low, high-Z", + "follows nACK" }; /* The registers are called compatible-PnP because the register layout is modelled after ISA-PnP, the access @@ -2396,7 +2399,8 @@ EXPORT_SYMBOL (parport_pc_unregister_port); /* ITE support maintained by Rich Liu <richliu@poorman.org> */ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq, - int autodma, struct parport_pc_via_data *via) + int autodma, + const struct parport_pc_via_data *via) { short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 }; struct resource *base_res; @@ -2524,7 +2528,8 @@ static struct parport_pc_via_data via_8231_data __devinitdata = { }; static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq, - int autodma, struct parport_pc_via_data *via) + int autodma, + const struct parport_pc_via_data *via) { u8 tmp, tmp2, siofunc; u8 ppcontrol = 0; @@ -2694,8 +2699,9 @@ enum parport_pc_sio_types { /* each element directly indexed from enum list, above */ static struct parport_pc_superio { - int (*probe) (struct pci_dev *pdev, int autoirq, int autodma, struct parport_pc_via_data *via); - struct parport_pc_via_data *via; + int (*probe) (struct pci_dev *pdev, int autoirq, int autodma, + const struct parport_pc_via_data *via); + const struct parport_pc_via_data *via; } parport_pc_superio_info[] __devinitdata = { { sio_via_probe, &via_686a_data, }, { sio_via_probe, &via_8231_data, }, @@ -2828,7 +2834,7 @@ static struct parport_pc_pci { /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */ }; -static struct pci_device_id parport_pc_pci_tbl[] = { +static const struct pci_device_id parport_pc_pci_tbl[] = { /* Super-IO onboard chips */ { 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a }, { 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 }, diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index 4b48b31..b62aee8 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -11,9 +11,9 @@ #include <linux/string.h> #include <asm/uaccess.h> -static struct { - char *token; - char *descr; +static const struct { + const char *token; + const char *descr; } classes[] = { { "", "Legacy device" }, { "PRINTER", "Printer" }, @@ -128,8 +128,131 @@ static void parse_data(struct parport *port, int device, char *str) kfree(txt); } +/* Read up to count-1 bytes of device id. Terminate buffer with + * '\0'. Buffer begins with two Device ID length bytes as given by + * device. */ +static ssize_t parport_read_device_id (struct parport *port, char *buffer, + size_t count) +{ + unsigned char length[2]; + unsigned lelen, belen; + size_t idlens[4]; + unsigned numidlens; + unsigned current_idlen; + ssize_t retval; + size_t len; + + /* First two bytes are MSB,LSB of inclusive length. */ + retval = parport_read (port, length, 2); + + if (retval < 0) + return retval; + if (retval != 2) + return -EIO; + + if (count < 2) + return 0; + memcpy(buffer, length, 2); + len = 2; + + /* Some devices wrongly send LE length, and some send it two + * bytes short. Construct a sorted array of lengths to try. */ + belen = (length[0] << 8) + length[1]; + lelen = (length[1] << 8) + length[0]; + idlens[0] = min(belen, lelen); + idlens[1] = idlens[0]+2; + if (belen != lelen) { + int off = 2; + /* Don't try lenghts of 0x100 and 0x200 as 1 and 2 */ + if (idlens[0] <= 2) + off = 0; + idlens[off] = max(belen, lelen); + idlens[off+1] = idlens[off]+2; + numidlens = off+2; + } + else { + /* Some devices don't truly implement Device ID, but + * just return constant nibble forever. This catches + * also those cases. */ + if (idlens[0] == 0 || idlens[0] > 0xFFF) { + printk (KERN_DEBUG "%s: reported broken Device ID" + " length of %#zX bytes\n", + port->name, idlens[0]); + return -EIO; + } + numidlens = 2; + } + + /* Try to respect the given ID length despite all the bugs in + * the ID length. Read according to shortest possible ID + * first. */ + for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) { + size_t idlen = idlens[current_idlen]; + if (idlen+1 >= count) + break; + + retval = parport_read (port, buffer+len, idlen-len); + + if (retval < 0) + return retval; + len += retval; + + if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) { + if (belen != len) { + printk (KERN_DEBUG "%s: Device ID was %d bytes" + " while device told it would be %d" + " bytes\n", + port->name, len, belen); + } + goto done; + } + + /* This might end reading the Device ID too + * soon. Hopefully the needed fields were already in + * the first 256 bytes or so that we must have read so + * far. */ + if (buffer[len-1] == ';') { + printk (KERN_DEBUG "%s: Device ID reading stopped" + " before device told data not available. " + "Current idlen %d of %d, len bytes %02X %02X\n", + port->name, current_idlen, numidlens, + length[0], length[1]); + goto done; + } + } + if (current_idlen < numidlens) { + /* Buffer not large enough, read to end of buffer. */ + size_t idlen, len2; + if (len+1 < count) { + retval = parport_read (port, buffer+len, count-len-1); + if (retval < 0) + return retval; + len += retval; + } + /* Read the whole ID since some devices would not + * otherwise give back the Device ID from beginning + * next time when asked. */ + idlen = idlens[current_idlen]; + len2 = len; + while(len2 < idlen && retval > 0) { + char tmp[4]; + retval = parport_read (port, tmp, + min(sizeof tmp, idlen-len2)); + if (retval < 0) + return retval; + len2 += retval; + } + } + /* In addition, there are broken devices out there that don't + even finish off with a semi-colon. We do not need to care + about those at this time. */ + done: + buffer[len] = '\0'; + return len; +} + /* Get Std 1284 Device ID. */ -ssize_t parport_device_id (int devnum, char *buffer, size_t len) +ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; struct pardevice *dev = parport_open (devnum, "Device ID probe", @@ -139,76 +262,20 @@ ssize_t parport_device_id (int devnum, char *buffer, size_t len) parport_claim_or_block (dev); - /* Negotiate to compatibility mode, and then to device ID mode. - * (This is in case we are already in device ID mode.) */ + /* Negotiate to compatibility mode, and then to device ID + * mode. (This so that we start form beginning of device ID if + * already in device ID mode.) */ parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); retval = parport_negotiate (dev->port, IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID); if (!retval) { - int idlen; - unsigned char length[2]; - - /* First two bytes are MSB,LSB of inclusive length. */ - retval = parport_read (dev->port, length, 2); - - if (retval != 2) goto end_id; - - idlen = (length[0] << 8) + length[1] - 2; - /* - * Check if the caller-allocated buffer is large enough - * otherwise bail out or there will be an at least off by one. - */ - if (idlen + 1 < len) - len = idlen; - else { - retval = -EINVAL; - goto out; - } - retval = parport_read (dev->port, buffer, len); - - if (retval != len) - printk (KERN_DEBUG "%s: only read %Zd of %Zd ID bytes\n", - dev->port->name, retval, - len); - - /* Some printer manufacturers mistakenly believe that - the length field is supposed to be _exclusive_. - In addition, there are broken devices out there - that don't even finish off with a semi-colon. */ - if (buffer[len - 1] != ';') { - ssize_t diff; - diff = parport_read (dev->port, buffer + len, 2); - retval += diff; - - if (diff) - printk (KERN_DEBUG - "%s: device reported incorrect " - "length field (%d, should be %Zd)\n", - dev->port->name, idlen, retval); - else { - /* One semi-colon short of a device ID. */ - buffer[len++] = ';'; - printk (KERN_DEBUG "%s: faking semi-colon\n", - dev->port->name); - - /* If we get here, I don't think we - need to worry about the possible - standard violation of having read - more than we were told to. The - device is non-compliant anyhow. */ - } - } - - end_id: - buffer[len] = '\0'; + retval = parport_read_device_id (dev->port, buffer, count); parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); + if (retval > 2) + parse_data (dev->port, dev->daisy, buffer+2); } - if (retval > 2) - parse_data (dev->port, dev->daisy, buffer); - -out: parport_release (dev); parport_close (dev); return retval; diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 9cb3ab1..ea62bed 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -1002,6 +1002,7 @@ EXPORT_SYMBOL(parport_register_driver); EXPORT_SYMBOL(parport_unregister_driver); EXPORT_SYMBOL(parport_register_device); EXPORT_SYMBOL(parport_unregister_device); +EXPORT_SYMBOL(parport_get_port); EXPORT_SYMBOL(parport_put_port); EXPORT_SYMBOL(parport_find_number); EXPORT_SYMBOL(parport_find_base); diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 6b7583f..a1f0b0b 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -31,15 +31,6 @@ static struct { } pnp_bios_callpoint; -/* The PnP BIOS entries in the GDT */ -#define PNP_GDT (GDT_ENTRY_PNPBIOS_BASE * 8) - -#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */ -#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */ -#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */ -#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */ -#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */ - /* * These are some opcodes for a "static asmlinkage" * As this code is *not* executed inside the linux kernel segment, but in a @@ -67,16 +58,11 @@ __asm__( ".previous \n" ); -#define Q_SET_SEL(cpu, selname, address, size) \ -do { \ -set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], __va((u32)(address))); \ -set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \ -} while(0) - #define Q2_SET_SEL(cpu, selname, address, size) \ do { \ -set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], (u32)(address)); \ -set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \ +struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ +set_base(gdt[(selname) >> 3], (u32)(address)); \ +set_limit(gdt[(selname) >> 3], size); \ } while(0) static struct desc_struct bad_bios_desc = { 0, 0x00409200 }; @@ -115,8 +101,8 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, return PNP_FUNCTION_NOT_SUPPORTED; cpu = get_cpu(); - save_desc_40 = per_cpu(cpu_gdt_table,cpu)[0x40 / 8]; - per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = bad_bios_desc; + save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; + get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; /* On some boxes IRQ's during PnP BIOS calls are deadly. */ spin_lock_irqsave(&pnp_bios_lock, flags); @@ -158,7 +144,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, ); spin_unlock_irqrestore(&pnp_bios_lock, flags); - per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = save_desc_40; + get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; put_cpu(); /* If we get here and this is set then the PnP BIOS faulted on us. */ @@ -290,12 +276,15 @@ int pnp_bios_dev_node_info(struct pnp_dev_node_info *data) static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) { u16 status; + u16 tmp_nodenum; if (!pnp_bios_present()) return PNP_FUNCTION_NOT_SUPPORTED; if ( !boot && pnpbios_dont_use_current_config ) return PNP_FUNCTION_NOT_SUPPORTED; + tmp_nodenum = *nodenum; status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0, - nodenum, sizeof(char), data, 65536); + &tmp_nodenum, sizeof(tmp_nodenum), data, 65536); + *nodenum = tmp_nodenum; return status; } @@ -535,10 +524,12 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); - for(i=0; i < NR_CPUS; i++) - { - Q2_SET_SEL(i, PNP_CS32, &pnp_bios_callfunc, 64 * 1024); - Q_SET_SEL(i, PNP_CS16, header->fields.pm16cseg, 64 * 1024); - Q_SET_SEL(i, PNP_DS, header->fields.pm16dseg, 64 * 1024); - } + for (i = 0; i < NR_CPUS; i++) { + struct desc_struct *gdt = get_cpu_gdt_table(i); + if (!gdt) + continue; + set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc); + set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], __va(header->fields.pm16cseg)); + set_base(gdt[GDT_ENTRY_PNPBIOS_DS], __va(header->fields.pm16dseg)); + } } diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index c99a2fe..9803c93 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -2,7 +2,7 @@ # Makefile for the S/390 specific device drivers # -obj-y += s390mach.o sysinfo.o +obj-y += s390mach.o sysinfo.o s390_rdev.o obj-y += cio/ block/ char/ crypto/ net/ scsi/ drivers-y += drivers/s390/built-in.o diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 6e7d7b0..6f50cc9 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -1,11 +1,11 @@ -if ARCH_S390 +if S390 comment "S/390 block device drivers" - depends on ARCH_S390 + depends on S390 config BLK_DEV_XPRAM tristate "XPRAM disk support" - depends on ARCH_S390 + depends on S390 help Select this option if you want to use your expanded storage on S/390 or zSeries as a disk. This is useful as a _fast_ swap device if you @@ -49,7 +49,7 @@ config DASD_FBA config DASD_DIAG tristate "Support for DIAG access to Disks" - depends on DASD && ( ARCH_S390X = 'n' || EXPERIMENTAL) + depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL) help Select this option if you want to use Diagnose250 command to access Disks under VM. If you are not running under VM or unsure what it is, diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index fdb6138..f779f67 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -7,7 +7,7 @@ * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * - * $Revision: 1.167 $ + * $Revision: 1.172 $ */ #include <linux/config.h> @@ -604,7 +604,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize, void dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) { -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT struct ccw1 *ccw; /* Clear any idals used for the request. */ @@ -1224,6 +1224,12 @@ __dasd_start_head(struct dasd_device * device) if (list_empty(&device->ccw_queue)) return; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); + /* check FAILFAST */ + if (device->stopped & ~DASD_STOPPED_PENDING && + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + dasd_schedule_bh(device); + } if ((cqr->status == DASD_CQR_QUEUED) && (!device->stopped)) { /* try to start the first I/O that can be started */ @@ -1323,7 +1329,7 @@ void dasd_schedule_bh(struct dasd_device * device) { /* Protect against rescheduling. */ - if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) + if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) return; dasd_get_device(device); tasklet_hi_schedule(&device->tasklet); @@ -1750,8 +1756,10 @@ dasd_exit(void) * SECTION: common functions for ccw_driver use */ -/* initial attempt at a probe function. this can be simplified once - * the other detection code is gone */ +/* + * Initial attempt at a probe function. this can be simplified once + * the other detection code is gone. + */ int dasd_generic_probe (struct ccw_device *cdev, struct dasd_discipline *discipline) @@ -1770,8 +1778,10 @@ dasd_generic_probe (struct ccw_device *cdev, return ret; } -/* this will one day be called from a global not_oper handler. - * It is also used by driver_unregister during module unload */ +/* + * This will one day be called from a global not_oper handler. + * It is also used by driver_unregister during module unload. + */ void dasd_generic_remove (struct ccw_device *cdev) { @@ -1798,9 +1808,11 @@ dasd_generic_remove (struct ccw_device *cdev) dasd_delete_device(device); } -/* activate a device. This is called from dasd_{eckd,fba}_probe() when either +/* + * Activate a device. This is called from dasd_{eckd,fba}_probe() when either * the device is detected for the first time and is supposed to be used - * or the user has started activation through sysfs */ + * or the user has started activation through sysfs. + */ int dasd_generic_set_online (struct ccw_device *cdev, struct dasd_discipline *discipline) @@ -1917,7 +1929,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event) if (cqr->status == DASD_CQR_IN_IO) cqr->status = DASD_CQR_FAILED; device->stopped |= DASD_STOPPED_DC_EIO; - dasd_schedule_bh(device); } else { list_for_each_entry(cqr, &device->ccw_queue, list) if (cqr->status == DASD_CQR_IN_IO) { @@ -1927,6 +1938,7 @@ dasd_generic_notify(struct ccw_device *cdev, int event) device->stopped |= DASD_STOPPED_DC_WAIT; dasd_set_timer(device, 0); } + dasd_schedule_bh(device); ret = 1; break; case CIO_OPER: diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index ab8754e..ba80fde 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -6,7 +6,7 @@ * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.51 $ + * $Revision: 1.53 $ */ #include <linux/config.h> @@ -25,6 +25,7 @@ #include <asm/io.h> #include <asm/s390_ext.h> #include <asm/todclk.h> +#include <asm/vtoc.h> #include "dasd_int.h" #include "dasd_diag.h" @@ -74,7 +75,7 @@ dia250(void *iob, int cmd) int rc; __asm__ __volatile__( -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT " lghi %0,3\n" " lgr 0,%3\n" " diag 0,%2,0x250\n" @@ -329,7 +330,7 @@ dasd_diag_check_device(struct dasd_device *device) struct dasd_diag_private *private; struct dasd_diag_characteristics *rdc_data; struct dasd_diag_bio bio; - struct dasd_diag_cms_label *label; + struct vtoc_cms_label *label; blocknum_t end_block; unsigned int sb, bsize; int rc; @@ -380,7 +381,7 @@ dasd_diag_check_device(struct dasd_device *device) mdsk_term_io(device); /* figure out blocksize of device */ - label = (struct dasd_diag_cms_label *) get_zeroed_page(GFP_KERNEL); + label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL); if (label == NULL) { DEV_MESSAGE(KERN_WARNING, device, "%s", "No memory to allocate initialization request"); @@ -548,6 +549,8 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req) } cqr->retries = DIAG_MAX_RETRIES; cqr->buildclk = get_clock(); + if (req->flags & REQ_FAILFAST) + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->device = device; cqr->expires = DIAG_TIMEOUT; cqr->status = DASD_CQR_FILLED; diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h index df31484..a4f80bd 100644 --- a/drivers/s390/block/dasd_diag.h +++ b/drivers/s390/block/dasd_diag.h @@ -6,7 +6,7 @@ * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.8 $ + * $Revision: 1.9 $ */ #define MDSK_WRITE_REQ 0x01 @@ -44,29 +44,8 @@ struct dasd_diag_characteristics { u8 rdev_features; } __attribute__ ((packed, aligned(4))); -struct dasd_diag_cms_label { - u8 label_id[4]; - u8 vol_id[6]; - u16 version_id; - u32 block_size; - u32 origin_ptr; - u32 usable_count; - u32 formatted_count; - u32 block_count; - u32 used_count; - u32 fst_size; - u32 fst_count; - u8 format_date[6]; - u8 reserved1[2]; - u32 disk_offset; - u32 map_block; - u32 hblk_disp; - u32 user_disp; - u8 reserved2[4]; - u8 segment_name[8]; -} __attribute__ ((packed)); - -#ifdef CONFIG_ARCH_S390X + +#ifdef CONFIG_64BIT #define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT typedef u64 blocknum_t; @@ -107,7 +86,7 @@ struct dasd_diag_rw_io { struct dasd_diag_bio *bio_list; u8 spare4[8]; } __attribute__ ((packed, aligned(8))); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ #define DASD_DIAG_FLAGA_DEFAULT 0x0 typedef u32 blocknum_t; @@ -146,4 +125,4 @@ struct dasd_diag_rw_io { u32 interrupt_params; u8 spare3[20]; } __attribute__ ((packed, aligned(8))); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 811060e..96eb482 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -7,7 +7,7 @@ * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.71 $ + * $Revision: 1.74 $ */ #include <linux/config.h> @@ -1041,7 +1041,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) /* Eckd can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (device->s2b_shift + 9); -#if defined(CONFIG_ARCH_S390X) +#if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) cidaw += bv->bv_len >> (device->s2b_shift + 9); @@ -1136,6 +1136,8 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) recid++; } } + if (req->flags & REQ_FAILFAST) + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->device = device; cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->lpm = private->path_data.ppm; @@ -1252,6 +1254,7 @@ dasd_eckd_release(struct block_device *bdev, int no, long args) cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 0; cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); @@ -1296,6 +1299,7 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args) cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 0; cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); @@ -1339,6 +1343,7 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args) cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 0; cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 28cb461..8ec75dc 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -4,7 +4,7 @@ * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.40 $ + * $Revision: 1.41 $ */ #include <linux/config.h> @@ -271,7 +271,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (device->s2b_shift + 9); -#if defined(CONFIG_ARCH_S390X) +#if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) cidaw += bv->bv_len / blksize; @@ -352,6 +352,8 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) recid++; } } + if (req->flags & REQ_FAILFAST) + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->device = device; cqr->expires = 5 * 60 * HZ; /* 5 minutes */ cqr->retries = 32; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 9fab04f..2fb05c4 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -6,7 +6,7 @@ * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * - * $Revision: 1.65 $ + * $Revision: 1.68 $ */ #ifndef DASD_INT_H @@ -208,6 +208,7 @@ struct dasd_ccw_req { /* per dasd_ccw_req flags */ #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ +#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ /* Signature for error recovery functions. */ typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 789595b..044b753 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -7,7 +7,7 @@ * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * - * $Revision: 1.47 $ + * $Revision: 1.50 $ * * i/o controls for the dasd driver. */ @@ -352,6 +352,9 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) if (device == NULL) return -ENODEV; + if (dasd_profile_level == DASD_PROFILE_OFF) + return -EIO; + if (copy_to_user((long __user *) args, (long *) &device->profile, sizeof (struct dasd_profile_info_t))) return -EFAULT; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 4fde411..2e727f4 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -15,7 +15,7 @@ #include <asm/io.h> #include <linux/completion.h> #include <linux/interrupt.h> -#include <asm/ccwdev.h> // for s390_root_dev_(un)register() +#include <asm/s390_rdev.h> //#define DCSSBLK_DEBUG /* Debug messages on/off */ #define DCSSBLK_NAME "dcssblk" diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index d428c90..bf3a67c 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -160,7 +160,7 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) "0: ipm %0\n" " srl %0,28\n" "1:\n" -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,1b\n" @@ -208,7 +208,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) "0: ipm %0\n" " srl %0,28\n" "1:\n" -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,1b\n" diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 5a6cef2..80f7f31 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c @@ -204,7 +204,7 @@ cpi_module_init(void) printk(KERN_WARNING "cpi: no control program identification " "support\n"); sclp_unregister(&sclp_cpi_event); - return -ENOTSUPP; + return -EOPNOTSUPP; } req = cpi_prepare_req(); diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 83f7577..56fa691 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused) psw_t quiesce_psw; int cpu; - if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) + if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) signal_processor(smp_processor_id(), sigp_stop); /* Wait for all other cpus to enter stopped state */ for_each_online_cpu(cpu) { diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 559d514..5ced272 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -65,7 +65,7 @@ static void tapeblock_trigger_requeue(struct tape_device *device) { /* Protect against rescheduling. */ - if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) + if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0) return; schedule_work(&device->blk_data.requeue_task); } diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 5473c23..5acc0ac 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -66,7 +66,7 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout, __cmdl = len; err = 0; asm volatile ( -#ifdef __s390x__ +#ifdef CONFIG_64BIT "diag %2,%4,0x288\n" "1: \n" ".section .fixup,\"ax\"\n" diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index a1c52a6..daf21e0 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/blacklist.c * S/390 common I/O routines -- blacklisting of specific devices - * $Revision: 1.35 $ + * $Revision: 1.39 $ * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -15,6 +15,7 @@ #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/device.h> @@ -34,10 +35,10 @@ * These can be single devices or ranges of devices */ -/* 65536 bits to indicate if a devno is blacklisted or not */ -#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \ +/* 65536 bits for each set to indicate if a devno is blacklisted or not */ +#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ (8*sizeof(long))) -static unsigned long bl_dev[__BL_DEV_WORDS]; +static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS]; typedef enum {add, free} range_action; /* @@ -45,21 +46,23 @@ typedef enum {add, free} range_action; * (Un-)blacklist the devices from-to */ static inline void -blacklist_range (range_action action, unsigned int from, unsigned int to) +blacklist_range (range_action action, unsigned int from, unsigned int to, + unsigned int ssid) { if (!to) to = from; - if (from > to || to > __MAX_SUBCHANNELS) { + if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) { printk (KERN_WARNING "Invalid blacklist range " - "0x%04x to 0x%04x, skipping\n", from, to); + "0.%x.%04x to 0.%x.%04x, skipping\n", + ssid, from, ssid, to); return; } for (; from <= to; from++) { if (action == add) - set_bit (from, bl_dev); + set_bit (from, bl_dev[ssid]); else - clear_bit (from, bl_dev); + clear_bit (from, bl_dev[ssid]); } } @@ -69,7 +72,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to) * Shamelessly grabbed from dasd_devmap.c. */ static inline int -blacklist_busid(char **str, int *id0, int *id1, int *devno) +blacklist_busid(char **str, int *id0, int *ssid, int *devno) { int val, old_style; char *sav; @@ -86,7 +89,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno) goto confused; val = simple_strtoul(*str, str, 16); if (old_style || (*str)[0] != '.') { - *id0 = *id1 = 0; + *id0 = *ssid = 0; if (val < 0 || val > 0xffff) goto confused; *devno = val; @@ -105,7 +108,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno) val = simple_strtoul(*str, str, 16); if (val < 0 || val > 0xff || (*str)++[0] != '.') goto confused; - *id1 = val; + *ssid = val; if (!isxdigit((*str)[0])) /* We require at least one hex digit */ goto confused; val = simple_strtoul(*str, str, 16); @@ -125,7 +128,7 @@ confused: static inline int blacklist_parse_parameters (char *str, range_action action) { - unsigned int from, to, from_id0, to_id0, from_id1, to_id1; + unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; while (*str != 0 && *str != '\n') { range_action ra = action; @@ -142,23 +145,25 @@ blacklist_parse_parameters (char *str, range_action action) */ if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { - from = 0; - to = __MAX_SUBCHANNELS; + int j; + str += 3; + for (j=0; j <= __MAX_SSID; j++) + blacklist_range(ra, 0, __MAX_SUBCHANNEL, j); } else { int rc; rc = blacklist_busid(&str, &from_id0, - &from_id1, &from); + &from_ssid, &from); if (rc) continue; to = from; to_id0 = from_id0; - to_id1 = from_id1; + to_ssid = from_ssid; if (*str == '-') { str++; rc = blacklist_busid(&str, &to_id0, - &to_id1, &to); + &to_ssid, &to); if (rc) continue; } @@ -168,18 +173,19 @@ blacklist_parse_parameters (char *str, range_action action) strsep(&str, ",\n")); continue; } - if ((from_id0 != to_id0) || (from_id1 != to_id1)) { + if ((from_id0 != to_id0) || + (from_ssid != to_ssid)) { printk(KERN_WARNING "invalid cio_ignore range " "%x.%x.%04x-%x.%x.%04x\n", - from_id0, from_id1, from, - to_id0, to_id1, to); + from_id0, from_ssid, from, + to_id0, to_ssid, to); continue; } + pr_debug("blacklist_setup: adding range " + "from %x.%x.%04x to %x.%x.%04x\n", + from_id0, from_ssid, from, to_id0, to_ssid, to); + blacklist_range (ra, from, to, to_ssid); } - /* FIXME: ignoring id0 and id1 here. */ - pr_debug("blacklist_setup: adding range " - "from 0.0.%04x to 0.0.%04x\n", from, to); - blacklist_range (ra, from, to); } return 1; } @@ -213,12 +219,33 @@ __setup ("cio_ignore=", blacklist_setup); * Used by validate_subchannel() */ int -is_blacklisted (int devno) +is_blacklisted (int ssid, int devno) { - return test_bit (devno, bl_dev); + return test_bit (devno, bl_dev[ssid]); } #ifdef CONFIG_PROC_FS +static int +__s390_redo_validation(struct subchannel_id schid, void *data) +{ + int ret; + struct subchannel *sch; + + sch = get_subchannel_by_schid(schid); + if (sch) { + /* Already known. */ + put_device(&sch->dev); + return 0; + } + ret = css_probe_device(schid); + if (ret == -ENXIO) + return ret; /* We're through. */ + if (ret == -ENOMEM) + /* Stop validation for now. Bad, but no need for a panic. */ + return ret; + return 0; +} + /* * Function: s390_redo_validation * Look for no longer blacklisted devices @@ -226,29 +253,9 @@ is_blacklisted (int devno) static inline void s390_redo_validation (void) { - unsigned int irq; - CIO_TRACE_EVENT (0, "redoval"); - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - int ret; - struct subchannel *sch; - - sch = get_subchannel_by_schid(irq); - if (sch) { - /* Already known. */ - put_device(&sch->dev); - continue; - } - ret = css_probe_device(irq); - if (ret == -ENXIO) - break; /* We're through. */ - if (ret == -ENOMEM) - /* - * Stop validation for now. Bad, but no need for a - * panic. - */ - break; - } + + for_each_subchannel(__s390_redo_validation, NULL); } /* @@ -278,41 +285,90 @@ blacklist_parse_proc_parameters (char *buf) s390_redo_validation (); } -/* FIXME: These should be real bus ids and not home-grown ones! */ -static int cio_ignore_read (char *page, char **start, off_t off, - int count, int *eof, void *data) +/* Iterator struct for all devices. */ +struct ccwdev_iter { + int devno; + int ssid; + int in_range; +}; + +static void * +cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) { - const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */ - long devno; - int len; - - len = 0; - for (devno = off; /* abuse the page variable - * as counter, see fs/proc/generic.c */ - devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) { - if (!test_bit(devno, bl_dev)) - continue; - len += sprintf(page + len, "0.0.%04lx", devno); - if (test_bit(devno + 1, bl_dev)) { /* print range */ - while (++devno < __MAX_SUBCHANNELS) - if (!test_bit(devno, bl_dev)) - break; - len += sprintf(page + len, "-0.0.%04lx", --devno); - } - len += sprintf(page + len, "\n"); - } + struct ccwdev_iter *iter; + + if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + return NULL; + iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL); + if (!iter) + return ERR_PTR(-ENOMEM); + iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); + iter->devno = *offset % (__MAX_SUBCHANNEL + 1); + return iter; +} + +static void +cio_ignore_proc_seq_stop(struct seq_file *s, void *it) +{ + if (!IS_ERR(it)) + kfree(it); +} + +static void * +cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) +{ + struct ccwdev_iter *iter; + + if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + return NULL; + iter = it; + if (iter->devno == __MAX_SUBCHANNEL) { + iter->devno = 0; + iter->ssid++; + if (iter->ssid > __MAX_SSID) + return NULL; + } else + iter->devno++; + (*offset)++; + return iter; +} - if (devno < __MAX_SUBCHANNELS) - *eof = 1; - *start = (char *) (devno - off); /* number of checked entries */ - return len; +static int +cio_ignore_proc_seq_show(struct seq_file *s, void *it) +{ + struct ccwdev_iter *iter; + + iter = it; + if (!is_blacklisted(iter->ssid, iter->devno)) + /* Not blacklisted, nothing to output. */ + return 0; + if (!iter->in_range) { + /* First device in range. */ + if ((iter->devno == __MAX_SUBCHANNEL) || + !is_blacklisted(iter->ssid, iter->devno + 1)) + /* Singular device. */ + return seq_printf(s, "0.%x.%04x\n", + iter->ssid, iter->devno); + iter->in_range = 1; + return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno); + } + if ((iter->devno == __MAX_SUBCHANNEL) || + !is_blacklisted(iter->ssid, iter->devno + 1)) { + /* Last device in range. */ + iter->in_range = 0; + return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno); + } + return 0; } -static int cio_ignore_write(struct file *file, const char __user *user_buf, - unsigned long user_len, void *data) +static ssize_t +cio_ignore_write(struct file *file, const char __user *user_buf, + size_t user_len, loff_t *offset) { char *buf; + if (*offset) + return -EINVAL; if (user_len > 65536) user_len = 65536; buf = vmalloc (user_len + 1); /* maybe better use the stack? */ @@ -330,6 +386,27 @@ static int cio_ignore_write(struct file *file, const char __user *user_buf, return user_len; } +static struct seq_operations cio_ignore_proc_seq_ops = { + .start = cio_ignore_proc_seq_start, + .stop = cio_ignore_proc_seq_stop, + .next = cio_ignore_proc_seq_next, + .show = cio_ignore_proc_seq_show, +}; + +static int +cio_ignore_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &cio_ignore_proc_seq_ops); +} + +static struct file_operations cio_ignore_proc_fops = { + .open = cio_ignore_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = cio_ignore_write, +}; + static int cio_ignore_proc_init (void) { @@ -340,8 +417,7 @@ cio_ignore_proc_init (void) if (!entry) return 0; - entry->read_proc = cio_ignore_read; - entry->write_proc = cio_ignore_write; + entry->proc_fops = &cio_ignore_proc_fops; return 1; } diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h index fb42caf..95e25c1 100644 --- a/drivers/s390/cio/blacklist.h +++ b/drivers/s390/cio/blacklist.h @@ -1,6 +1,6 @@ #ifndef S390_BLACKLIST_H #define S390_BLACKLIST_H -extern int is_blacklisted (int devno); +extern int is_blacklisted (int ssid, int devno); #endif diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index be9d2d6..e849289 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/ccwgroup.c * bus driver for ccwgroup - * $Revision: 1.32 $ + * $Revision: 1.33 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev) struct ccwgroup_driver *gdrv; int ret; - if (atomic_compare_and_swap(0, 1, &gdev->onoff)) + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_ONLINE) { ret = 0; @@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev) struct ccwgroup_driver *gdrv; int ret; - if (atomic_compare_and_swap(0, 1, &gdev->onoff)) + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_OFFLINE) { ret = 0; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index fa3c23b..7270808 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/chsc.c * S/390 common I/O routines -- channel subsystem call - * $Revision: 1.120 $ + * $Revision: 1.126 $ * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -24,8 +24,6 @@ #include "ioasm.h" #include "chsc.h" -static struct channel_path *chps[NR_CHPIDS]; - static void *sei_page; static int new_channel_path(int chpid); @@ -33,13 +31,13 @@ static int new_channel_path(int chpid); static inline void set_chp_logically_online(int chp, int onoff) { - chps[chp]->state = onoff; + css[0]->chps[chp]->state = onoff; } static int get_chp_status(int chp) { - return (chps[chp] ? chps[chp]->state : -ENODEV); + return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); } void @@ -77,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) struct { struct chsc_header request; - u16 reserved1; + u16 reserved1a:10; + u16 ssid:2; + u16 reserved1b:4; u16 f_sch; /* first subchannel */ u16 reserved2; u16 l_sch; /* last subchannel */ @@ -104,8 +104,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) .code = 0x0004, }; - ssd_area->f_sch = sch->irq; - ssd_area->l_sch = sch->irq; + ssd_area->ssid = sch->schid.ssid; + ssd_area->f_sch = sch->schid.sch_no; + ssd_area->l_sch = sch->schid.sch_no; ccode = chsc(ssd_area); if (ccode > 0) { @@ -147,7 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) */ if (ssd_area->st > 3) { /* uhm, that looks strange... */ CIO_CRW_EVENT(0, "Strange subchannel type %d" - " for sch %04x\n", ssd_area->st, sch->irq); + " for sch 0.%x.%04x\n", ssd_area->st, + sch->schid.ssid, sch->schid.sch_no); /* * There may have been a new subchannel type defined in the * time since this code was written; since we don't know which @@ -156,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) return 0; } else { const char *type[4] = {"I/O", "chsc", "message", "ADM"}; - CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", - sch->irq, type[ssd_area->st]); + CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", + sch->schid.ssid, sch->schid.sch_no, + type[ssd_area->st]); sch->ssd_info.valid = 1; sch->ssd_info.type = ssd_area->st; @@ -218,13 +221,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) int j; int mask; struct subchannel *sch; - __u8 *chpid; + struct channel_path *chpid; struct schib schib; sch = to_subchannel(dev); chpid = data; for (j = 0; j < 8; j++) - if (sch->schib.pmcw.chpid[j] == *chpid) + if (sch->schib.pmcw.chpid[j] == chpid->id) break; if (j >= 8) return 0; @@ -232,7 +235,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) mask = 0x80 >> j; spin_lock(&sch->lock); - stsch(sch->irq, &schib); + stsch(sch->schid, &schib); if (!schib.pmcw.dnv) goto out_unreg; memcpy(&sch->schib, &schib, sizeof(struct schib)); @@ -284,7 +287,7 @@ out_unlock: out_unreg: spin_unlock(&sch->lock); sch->lpm = 0; - if (css_enqueue_subchannel_slow(sch->irq)) { + if (css_enqueue_subchannel_slow(sch->schid)) { css_clear_subchannel_slow_list(); need_rescan = 1; } @@ -295,23 +298,30 @@ static inline void s390_set_chpid_offline( __u8 chpid) { char dbf_txt[15]; + struct device *dev; sprintf(dbf_txt, "chpr%x", chpid); CIO_TRACE_EVENT(2, dbf_txt); if (get_chp_status(chpid) <= 0) return; - - bus_for_each_dev(&css_bus_type, NULL, &chpid, + dev = get_device(&css[0]->chps[chpid]->dev); + bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), s390_subchannel_remove_chpid); if (need_rescan || css_slow_subchannels_exist()) queue_work(slow_path_wq, &slow_path_work); + put_device(dev); } +struct res_acc_data { + struct channel_path *chp; + u32 fla_mask; + u16 fla; +}; + static int -s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, - struct subchannel *sch) +s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) { int found; int chp; @@ -323,8 +333,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, * check if chpid is in information updated by ssd */ if (sch->ssd_info.valid && - sch->ssd_info.chpid[chp] == chpid && - (sch->ssd_info.fla[chp] & fla_mask) == fla) { + sch->ssd_info.chpid[chp] == res_data->chp->id && + (sch->ssd_info.fla[chp] & res_data->fla_mask) + == res_data->fla) { found = 1; break; } @@ -337,24 +348,87 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, * new path information and eventually check for logically * offline chpids. */ - ccode = stsch(sch->irq, &sch->schib); + ccode = stsch(sch->schid, &sch->schib); if (ccode > 0) return 0; return 0x80 >> chp; } +static inline int +s390_process_res_acc_new_sch(struct subchannel_id schid) +{ + struct schib schib; + int ret; + /* + * We don't know the device yet, but since a path + * may be available now to the device we'll have + * to do recognition again. + * Since we don't have any idea about which chpid + * that beast may be on we'll have to do a stsch + * on all devices, grr... + */ + if (stsch_err(schid, &schib)) + /* We're through */ + return need_rescan ? -EAGAIN : -ENXIO; + + /* Put it on the slow path. */ + ret = css_enqueue_subchannel_slow(schid); + if (ret) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + static int -s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) +__s390_process_res_acc(struct subchannel_id schid, void *data) { + int chp_mask, old_lpm; + struct res_acc_data *res_data; struct subchannel *sch; - int irq, rc; + + res_data = (struct res_acc_data *)data; + sch = get_subchannel_by_schid(schid); + if (!sch) + /* Check if a subchannel is newly available. */ + return s390_process_res_acc_new_sch(schid); + + spin_lock_irq(&sch->lock); + + chp_mask = s390_process_res_acc_sch(res_data, sch); + + if (chp_mask == 0) { + spin_unlock_irq(&sch->lock); + return 0; + } + old_lpm = sch->lpm; + sch->lpm = ((sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom) + | chp_mask) & sch->opm; + if (!old_lpm && sch->lpm) + device_trigger_reprobe(sch); + else if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + + spin_unlock_irq(&sch->lock); + put_device(&sch->dev); + return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; +} + + +static int +s390_process_res_acc (struct res_acc_data *res_data) +{ + int rc; char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x", chpid); + sprintf(dbf_txt, "accpr%x", res_data->chp->id); CIO_TRACE_EVENT( 2, dbf_txt); - if (fla != 0) { - sprintf(dbf_txt, "fla%x", fla); + if (res_data->fla != 0) { + sprintf(dbf_txt, "fla%x", res_data->fla); CIO_TRACE_EVENT( 2, dbf_txt); } @@ -365,70 +439,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) * The more information we have (info), the less scanning * will we have to do. */ - - if (!get_chp_status(chpid)) - return 0; /* no need to do the rest */ - - rc = 0; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - int chp_mask, old_lpm; - - sch = get_subchannel_by_schid(irq); - if (!sch) { - struct schib schib; - int ret; - /* - * We don't know the device yet, but since a path - * may be available now to the device we'll have - * to do recognition again. - * Since we don't have any idea about which chpid - * that beast may be on we'll have to do a stsch - * on all devices, grr... - */ - if (stsch(irq, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock_irq(&sch->lock); - - chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch); - - if (chp_mask == 0) { - - spin_unlock_irq(&sch->lock); - continue; - } - old_lpm = sch->lpm; - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | chp_mask) & sch->opm; - if (!old_lpm && sch->lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - - spin_unlock_irq(&sch->lock); - put_device(&sch->dev); - if (fla_mask == 0xffff) - break; - } + rc = for_each_subchannel(__s390_process_res_acc, res_data); + if (css_slow_subchannels_exist()) + rc = -EAGAIN; + else if (rc != -EAGAIN) + rc = 0; return rc; } @@ -466,6 +481,7 @@ int chsc_process_crw(void) { int chpid, ret; + struct res_acc_data res_data; struct { struct chsc_header request; u32 reserved1; @@ -499,8 +515,9 @@ chsc_process_crw(void) ret = 0; do { int ccode, status; + struct device *dev; memset(sei_area, 0, sizeof(*sei_area)); - + memset(&res_data, 0, sizeof(struct res_acc_data)); sei_area->request = (struct chsc_header) { .length = 0x0010, .code = 0x000e, @@ -573,26 +590,25 @@ chsc_process_crw(void) if (status < 0) new_channel_path(sei_area->rsid); else if (!status) - return 0; - if ((sei_area->vf & 0x80) == 0) { - pr_debug("chpid: %x\n", sei_area->rsid); - ret = s390_process_res_acc(sei_area->rsid, - 0, 0); - } else if ((sei_area->vf & 0xc0) == 0x80) { - pr_debug("chpid: %x link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xff00); - } else if ((sei_area->vf & 0xc0) == 0xc0) { - pr_debug("chpid: %x full link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xffff); + break; + dev = get_device(&css[0]->chps[sei_area->rsid]->dev); + res_data.chp = to_channelpath(dev); + pr_debug("chpid: %x", sei_area->rsid); + if ((sei_area->vf & 0xc0) != 0) { + res_data.fla = sei_area->fla; + if ((sei_area->vf & 0xc0) == 0xc0) { + pr_debug(" full link addr: %x", + sei_area->fla); + res_data.fla_mask = 0xffff; + } else { + pr_debug(" link addr: %x", + sei_area->fla); + res_data.fla_mask = 0xff00; + } } - pr_debug("\n"); - + ret = s390_process_res_acc(&res_data); + pr_debug("\n\n"); + put_device(dev); break; default: /* other stuff */ @@ -604,12 +620,72 @@ chsc_process_crw(void) return ret; } +static inline int +__chp_add_new_sch(struct subchannel_id schid) +{ + struct schib schib; + int ret; + + if (stsch(schid, &schib)) + /* We're through */ + return need_rescan ? -EAGAIN : -ENXIO; + + /* Put it on the slow path. */ + ret = css_enqueue_subchannel_slow(schid); + if (ret) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + + static int -chp_add(int chpid) +__chp_add(struct subchannel_id schid, void *data) { + int i; + struct channel_path *chp; struct subchannel *sch; - int irq, ret, rc; + + chp = (struct channel_path *)data; + sch = get_subchannel_by_schid(schid); + if (!sch) + /* Check if the subchannel is now available. */ + return __chp_add_new_sch(schid); + spin_lock(&sch->lock); + for (i=0; i<8; i++) + if (sch->schib.pmcw.chpid[i] == chp->id) { + if (stsch(sch->schid, &sch->schib) != 0) { + /* Endgame. */ + spin_unlock(&sch->lock); + return -ENXIO; + } + break; + } + if (i==8) { + spin_unlock(&sch->lock); + return 0; + } + sch->lpm = ((sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom) + | 0x80 >> i) & sch->opm; + + if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + + spin_unlock(&sch->lock); + put_device(&sch->dev); + return 0; +} + +static int +chp_add(int chpid) +{ + int rc; char dbf_txt[15]; + struct device *dev; if (!get_chp_status(chpid)) return 0; /* no need to do the rest */ @@ -617,59 +693,13 @@ chp_add(int chpid) sprintf(dbf_txt, "cadd%x", chpid); CIO_TRACE_EVENT(2, dbf_txt); - rc = 0; - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - int i; - - sch = get_subchannel_by_schid(irq); - if (!sch) { - struct schib schib; - - if (stsch(irq, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock(&sch->lock); - for (i=0; i<8; i++) - if (sch->schib.pmcw.chpid[i] == chpid) { - if (stsch(sch->irq, &sch->schib) != 0) { - /* Endgame. */ - spin_unlock(&sch->lock); - return rc; - } - break; - } - if (i==8) { - spin_unlock(&sch->lock); - return rc; - } - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | 0x80 >> i) & sch->opm; - - if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - - spin_unlock(&sch->lock); - put_device(&sch->dev); - } + dev = get_device(&css[0]->chps[chpid]->dev); + rc = for_each_subchannel(__chp_add, to_channelpath(dev)); + if (css_slow_subchannels_exist()) + rc = -EAGAIN; + if (rc != -EAGAIN) + rc = 0; + put_device(dev); return rc; } @@ -702,7 +732,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index) if (!device_is_online(sch)) /* cio could be doing I/O. */ return 0; - cc = stsch(sch->irq, &sch->schib); + cc = stsch(sch->schid, &sch->schib); if (cc) return 0; if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { @@ -743,7 +773,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) * just varied off path. Then kill it. */ if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { - if (css_enqueue_subchannel_slow(sch->irq)) { + if (css_enqueue_subchannel_slow(sch->schid)) { css_clear_subchannel_slow_list(); need_rescan = 1; } @@ -781,6 +811,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data) return 0; } +static int +__s390_vary_chpid_on(struct subchannel_id schid, void *data) +{ + struct schib schib; + struct subchannel *sch; + + sch = get_subchannel_by_schid(schid); + if (sch) { + put_device(&sch->dev); + return 0; + } + if (stsch_err(schid, &schib)) + /* We're through */ + return -ENXIO; + /* Put it on the slow path. */ + if (css_enqueue_subchannel_slow(schid)) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + /* * Function: s390_vary_chpid * Varies the specified chpid online or offline @@ -789,8 +842,7 @@ static int s390_vary_chpid( __u8 chpid, int on) { char dbf_text[15]; - int status, irq, ret; - struct subchannel *sch; + int status; sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); CIO_TRACE_EVENT( 2, dbf_text); @@ -815,30 +867,9 @@ s390_vary_chpid( __u8 chpid, int on) bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? s390_subchannel_vary_chpid_on : s390_subchannel_vary_chpid_off); - if (!on) - goto out; - /* Scan for new devices on varied on path. */ - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - struct schib schib; - - if (need_rescan) - break; - sch = get_subchannel_by_schid(irq); - if (sch) { - put_device(&sch->dev); - continue; - } - if (stsch(irq, &schib)) - /* We're through */ - break; - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(irq); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - } -out: + if (on) + /* Scan for new devices on varied on path. */ + for_each_subchannel(__s390_vary_chpid_on, NULL); if (need_rescan || css_slow_subchannels_exist()) queue_work(slow_path_wq, &slow_path_work); return 0; @@ -995,7 +1026,7 @@ new_channel_path(int chpid) chp->id = chpid; chp->state = 1; chp->dev = (struct device) { - .parent = &css_bus_device, + .parent = &css[0]->device, .release = chp_release, }; snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); @@ -1017,7 +1048,7 @@ new_channel_path(int chpid) device_unregister(&chp->dev); goto out_free; } else - chps[chpid] = chp; + css[0]->chps[chpid] = chp; return ret; out_free: kfree(chp); @@ -1030,7 +1061,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no) struct channel_path *chp; struct channel_path_desc *desc; - chp = chps[sch->schib.pmcw.chpid[chp_no]]; + chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; if (!chp) return NULL; desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); @@ -1051,6 +1082,54 @@ chsc_alloc_sei_area(void) return (sei_page ? 0 : -ENOMEM); } +int __init +chsc_enable_facility(int operation_code) +{ + int ret; + struct { + struct chsc_header request; + u8 reserved1:4; + u8 format:4; + u8 reserved2; + u16 operation_code; + u32 reserved3; + u32 reserved4; + u32 operation_data_area[252]; + struct chsc_header response; + u32 reserved5:4; + u32 format2:4; + u32 reserved6:24; + } *sda_area; + + sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); + if (!sda_area) + return -ENOMEM; + sda_area->request = (struct chsc_header) { + .length = 0x0400, + .code = 0x0031, + }; + sda_area->operation_code = operation_code; + + ret = chsc(sda_area); + if (ret > 0) { + ret = (ret == 3) ? -ENODEV : -EBUSY; + goto out; + } + switch (sda_area->response.code) { + case 0x0003: /* invalid request block */ + case 0x0007: + ret = -EINVAL; + break; + case 0x0004: /* command not provided */ + case 0x0101: /* facility not provided */ + ret = -EOPNOTSUPP; + break; + } + out: + free_page((unsigned long)sda_area); + return ret; +} + subsys_initcall(chsc_alloc_sei_area); struct css_general_char css_general_characteristics; diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index be20da4..44e4b4b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -1,12 +1,12 @@ #ifndef S390_CHSC_H #define S390_CHSC_H -#define NR_CHPIDS 256 - #define CHSC_SEI_ACC_CHPID 1 #define CHSC_SEI_ACC_LINKADDR 2 #define CHSC_SEI_ACC_FULLLINKADDR 3 +#define CHSC_SDA_OC_MSS 0x2 + struct chsc_header { u16 length; u16 code; @@ -43,7 +43,9 @@ struct css_general_char { u32 ext_mb : 1; /* bit 48 */ u32 : 7; u32 aif_tdd : 1; /* bit 56 */ - u32 : 10; + u32 : 1; + u32 qebsm : 1; /* bit 58 */ + u32 : 8; u32 aif_osa : 1; /* bit 67 */ u32 : 28; }__attribute__((packed)); @@ -63,4 +65,9 @@ extern int chsc_determine_css_characteristics(void); extern int css_characteristics_avail; extern void *chsc_get_chp_desc(struct subchannel*, int); + +extern int chsc_enable_facility(int); + +#define to_channelpath(dev) container_of(dev, struct channel_path, dev) + #endif diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 185bc73..7376bc8 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls - * $Revision: 1.135 $ + * $Revision: 1.138 $ * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -135,7 +135,7 @@ cio_tpi(void) return 0; irb = (struct irb *) __LC_IRB; /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) != 0) + if (tsch (tpi_info->schid, irb) != 0) /* Not status pending or not operational. */ return 1; sch = (struct subchannel *)(unsigned long)tpi_info->intparm; @@ -163,10 +163,11 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) else sch->lpm = 0; - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " - "subchannel %04x!\n", sch->irq); + "subchannel 0.%x.%04x!\n", sch->schid.ssid, + sch->schid.sch_no); sprintf(dbf_text, "no%s", sch->dev.bus_id); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); @@ -194,7 +195,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ sch->orb.spnd = sch->options.suspend; sch->orb.ssic = sch->options.suspend && sch->options.inter; sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only */ @@ -204,7 +205,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ sch->orb.key = key >> 4; /* issue "Start Subchannel" */ sch->orb.cpa = (__u32) __pa (cpa); - ccode = ssch (sch->irq, &sch->orb); + ccode = ssch (sch->schid, &sch->orb); /* process condition code */ sprintf (dbf_txt, "ccode:%d", ccode); @@ -243,7 +244,7 @@ cio_resume (struct subchannel *sch) CIO_TRACE_EVENT (4, "resIO"); CIO_TRACE_EVENT (4, sch->dev.bus_id); - ccode = rsch (sch->irq); + ccode = rsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (4, dbf_txt); @@ -283,7 +284,7 @@ cio_halt(struct subchannel *sch) /* * Issue "Halt subchannel" and process condition code */ - ccode = hsch (sch->irq); + ccode = hsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -318,7 +319,7 @@ cio_clear(struct subchannel *sch) /* * Issue "Clear subchannel" and process condition code */ - ccode = csch (sch->irq); + ccode = csch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -351,7 +352,7 @@ cio_cancel (struct subchannel *sch) CIO_TRACE_EVENT (2, "cancelIO"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = xsch (sch->irq); + ccode = xsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -359,7 +360,7 @@ cio_cancel (struct subchannel *sch) switch (ccode) { case 0: /* success */ /* Update information in scsw. */ - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); return 0; case 1: /* status pending */ return -EBUSY; @@ -381,7 +382,7 @@ cio_modify (struct subchannel *sch) ret = 0; for (retry = 0; retry < 5; retry++) { - ccode = msch_err (sch->irq, &sch->schib); + ccode = msch_err (sch->schid, &sch->schib); if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { @@ -414,7 +415,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = stsch (sch->irq, &sch->schib); + ccode = stsch (sch->schid, &sch->schib); if (ccode) return -ENODEV; @@ -432,13 +433,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) */ sch->schib.pmcw.csense = 0; if (ret == 0) { - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); if (sch->schib.pmcw.ena) break; } if (ret == -EBUSY) { struct irb irb; - if (tsch(sch->irq, &irb) != 0) + if (tsch(sch->schid, &irb) != 0) break; } } @@ -461,7 +462,7 @@ cio_disable_subchannel (struct subchannel *sch) CIO_TRACE_EVENT (2, "dissch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = stsch (sch->irq, &sch->schib); + ccode = stsch (sch->schid, &sch->schib); if (ccode == 3) /* Not operational. */ return -ENODEV; @@ -485,7 +486,7 @@ cio_disable_subchannel (struct subchannel *sch) */ break; if (ret == 0) { - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); if (!sch->schib.pmcw.ena) break; } @@ -508,12 +509,12 @@ cio_disable_subchannel (struct subchannel *sch) * -ENODEV for subchannels with invalid device number or blacklisted devices */ int -cio_validate_subchannel (struct subchannel *sch, unsigned int irq) +cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; - sprintf (dbf_txt, "valsch%x", irq); + sprintf (dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT (4, dbf_txt); /* Nuke all fields. */ @@ -522,17 +523,20 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) spin_lock_init(&sch->lock); /* Set a name for the subchannel */ - snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); + snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, + schid.sch_no); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. + * If stsch gets an exception, it means the current subchannel set + * is not valid. */ - sch->irq = irq; - ccode = stsch (irq, &sch->schib); + ccode = stsch_err (schid, &sch->schib); if (ccode) - return -ENXIO; + return (ccode == 3) ? -ENXIO : ccode; + sch->schid = schid; /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; @@ -541,9 +545,9 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) */ if (sch->st != 0) { CIO_DEBUG(KERN_INFO, 0, - "Subchannel %04X reports " + "Subchannel 0.%x.%04x reports " "non-I/O subchannel type %04X\n", - sch->irq, sch->st); + sch->schid.ssid, sch->schid.sch_no, sch->st); /* We stop here for non-io subchannels. */ return sch->st; } @@ -554,26 +558,29 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) return -ENODEV; /* Devno is valid. */ - if (is_blacklisted (sch->schib.pmcw.dev)) { + if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { /* * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ CIO_MSG_EVENT(0, "Blacklisted device detected " - "at devno %04X\n", sch->schib.pmcw.dev); + "at devno %04X, subchannel set %x\n", + sch->schib.pmcw.dev, sch->schid.ssid); return -ENODEV; } sch->opm = 0xff; - chsc_validate_chpids(sch); + if (!cio_is_console(sch->schid)) + chsc_validate_chpids(sch); sch->lpm = sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom & sch->opm; CIO_DEBUG(KERN_INFO, 0, - "Detected device %04X on subchannel %04X" + "Detected device %04x on subchannel 0.%x.%04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, + sch->schib.pmcw.dev, sch->schid.ssid, + sch->schid.sch_no, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* @@ -632,7 +639,7 @@ do_IRQ (struct pt_regs *regs) if (sch) spin_lock(&sch->lock); /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) == 0 && sch) { + if (tsch (tpi_info->schid, irb) == 0 && sch) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); @@ -691,28 +698,36 @@ wait_cons_dev (void) } static int -cio_console_irq(void) +cio_test_for_console(struct subchannel_id schid, void *data) { - int irq; + if (stsch_err(schid, &console_subchannel.schib) != 0) + return -ENXIO; + if (console_subchannel.schib.pmcw.dnv && + console_subchannel.schib.pmcw.dev == + console_devno) { + console_irq = schid.sch_no; + return 1; /* found */ + } + return 0; +} + + +static int +cio_get_console_sch_no(void) +{ + struct subchannel_id schid; + init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ - if (stsch(console_irq, &console_subchannel.schib) != 0 || + schid.sch_no = console_irq; + if (stsch(schid, &console_subchannel.schib) != 0 || !console_subchannel.schib.pmcw.dnv) return -1; console_devno = console_subchannel.schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - if (stsch(irq, &console_subchannel.schib) != 0) - break; - if (console_subchannel.schib.pmcw.dnv && - console_subchannel.schib.pmcw.dev == - console_devno) { - console_irq = irq; - break; - } - } + for_each_subchannel(cio_test_for_console, NULL); if (console_irq == -1) return -1; } else { @@ -728,17 +743,20 @@ cio_console_irq(void) struct subchannel * cio_probe_console(void) { - int irq, ret; + int sch_no, ret; + struct subchannel_id schid; if (xchg(&console_subchannel_in_use, 1) != 0) return ERR_PTR(-EBUSY); - irq = cio_console_irq(); - if (irq == -1) { + sch_no = cio_get_console_sch_no(); + if (sch_no == -1) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); - ret = cio_validate_subchannel(&console_subchannel, irq); + init_subchannel_id(&schid); + schid.sch_no = sch_no; + ret = cio_validate_subchannel(&console_subchannel, schid); if (ret) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); @@ -770,11 +788,11 @@ cio_release_console(void) /* Bah... hack to catch console special sausages. */ int -cio_is_console(int irq) +cio_is_console(struct subchannel_id schid) { if (!console_subchannel_in_use) return 0; - return (irq == console_subchannel.irq); + return schid_equal(&schid, &console_subchannel.schid); } struct subchannel * @@ -787,7 +805,7 @@ cio_get_console_subchannel(void) #endif static inline int -__disable_subchannel_easy(unsigned int schid, struct schib *schib) +__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { int retry, cc; @@ -805,7 +823,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib) } static inline int -__clear_subchannel_easy(unsigned int schid) +__clear_subchannel_easy(struct subchannel_id schid) { int retry; @@ -815,8 +833,8 @@ __clear_subchannel_easy(unsigned int schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(ti.irq, (struct irb *)__LC_IRB); - if (ti.irq == schid) + tsch(ti.schid, (struct irb *)__LC_IRB); + if (schid_equal(&ti.schid, &schid)) return 0; } udelay(100); @@ -825,31 +843,33 @@ __clear_subchannel_easy(unsigned int schid) } extern void do_reipl(unsigned long devno); +static int +__shutdown_subchannel_easy(struct subchannel_id schid, void *data) +{ + struct schib schib; + + if (stsch_err(schid, &schib)) + return -ENXIO; + if (!schib.pmcw.ena) + return 0; + switch(__disable_subchannel_easy(schid, &schib)) { + case 0: + case -ENODEV: + break; + default: /* -EBUSY */ + if (__clear_subchannel_easy(schid)) + break; /* give up... */ + stsch(schid, &schib); + __disable_subchannel_easy(schid, &schib); + } + return 0; +} -/* Clear all subchannels. */ void clear_all_subchannels(void) { - unsigned int schid; - local_irq_disable(); - for (schid=0;schid<=highest_subchannel;schid++) { - struct schib schib; - if (stsch(schid, &schib)) - break; /* break out of the loop */ - if (!schib.pmcw.ena) - continue; - switch(__disable_subchannel_easy(schid, &schib)) { - case 0: - case -ENODEV: - break; - default: /* -EBUSY */ - if (__clear_subchannel_easy(schid)) - break; /* give up... jump out of switch */ - stsch(schid, &schib); - __disable_subchannel_easy(schid, &schib); - } - } + for_each_subchannel(__shutdown_subchannel_easy, NULL); } /* Make sure all subchannels are quiet before we re-ipl an lpar. */ diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index c50a9da..0ca9873 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -1,6 +1,8 @@ #ifndef S390_CIO_H #define S390_CIO_H +#include "schid.h" + /* * where we put the ssd info */ @@ -83,7 +85,7 @@ struct orb { /* subchannel data structure used by I/O subroutines */ struct subchannel { - unsigned int irq; /* aka. subchannel number */ + struct subchannel_id schid; spinlock_t lock; /* subchannel lock */ enum { @@ -114,7 +116,7 @@ struct subchannel { #define to_subchannel(n) container_of(n, struct subchannel, dev) -extern int cio_validate_subchannel (struct subchannel *, unsigned int); +extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); extern int cio_enable_subchannel (struct subchannel *, unsigned int); extern int cio_disable_subchannel (struct subchannel *); extern int cio_cancel (struct subchannel *); @@ -127,14 +129,15 @@ extern int cio_cancel (struct subchannel *); extern int cio_set_options (struct subchannel *, int); extern int cio_get_options (struct subchannel *); extern int cio_modify (struct subchannel *); + /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); extern void cio_release_console(void); -extern int cio_is_console(int irq); +extern int cio_is_console(struct subchannel_id); extern struct subchannel *cio_get_console_subchannel(void); #else -#define cio_is_console(irq) 0 +#define cio_is_console(schid) 0 #define cio_get_console_subchannel() NULL #endif diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index b978f7f..0b03714 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1,5 +1,5 @@ /* - * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $) + * linux/drivers/s390/cio/cmf.c ($Revision: 1.19 $) * * Linux on zSeries Channel Measurement Facility support * @@ -178,7 +178,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) /* msch can silently fail, so do it again if necessary */ for (retry = 0; retry < 3; retry++) { /* prepare schib */ - stsch(sch->irq, schib); + stsch(sch->schid, schib); schib->pmcw.mme = mme; schib->pmcw.mbfc = mbfc; /* address can be either a block address or a block index */ @@ -188,7 +188,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) schib->pmcw.mbi = address; /* try to submit it */ - switch(ret = msch_err(sch->irq, schib)) { + switch(ret = msch_err(sch->schid, schib)) { case 0: break; case 1: @@ -202,7 +202,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) ret = -EINVAL; break; } - stsch(sch->irq, schib); /* restore the schib */ + stsch(sch->schid, schib); /* restore the schib */ if (ret) break; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 555119c..e565193 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/css.c * driver for channel subsystem - * $Revision: 1.85 $ + * $Revision: 1.93 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -21,19 +21,35 @@ #include "ioasm.h" #include "chsc.h" -unsigned int highest_subchannel; int need_rescan = 0; int css_init_done = 0; +static int max_ssid = 0; + +struct channel_subsystem *css[__MAX_CSSID + 1]; -struct pgid global_pgid; int css_characteristics_avail = 0; -struct device css_bus_device = { - .bus_id = "css0", -}; +inline int +for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) +{ + struct subchannel_id schid; + int ret; + + init_subchannel_id(&schid); + ret = -ENODEV; + do { + do { + ret = fn(schid, data); + if (ret) + break; + } while (schid.sch_no++ < __MAX_SUBCHANNEL); + schid.sch_no = 0; + } while (schid.ssid++ < max_ssid); + return ret; +} static struct subchannel * -css_alloc_subchannel(int irq) +css_alloc_subchannel(struct subchannel_id schid) { struct subchannel *sch; int ret; @@ -41,13 +57,11 @@ css_alloc_subchannel(int irq) sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); if (sch == NULL) return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel (sch, irq); + ret = cio_validate_subchannel (sch, schid); if (ret < 0) { kfree(sch); return ERR_PTR(ret); } - if (irq > highest_subchannel) - highest_subchannel = irq; if (sch->st != SUBCHANNEL_TYPE_IO) { /* For now we ignore all non-io subchannels. */ @@ -87,7 +101,7 @@ css_subchannel_release(struct device *dev) struct subchannel *sch; sch = to_subchannel(dev); - if (!cio_is_console(sch->irq)) + if (!cio_is_console(sch->schid)) kfree(sch); } @@ -99,7 +113,7 @@ css_register_subchannel(struct subchannel *sch) int ret; /* Initialize the subchannel structure */ - sch->dev.parent = &css_bus_device; + sch->dev.parent = &css[0]->device; sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; @@ -114,12 +128,12 @@ css_register_subchannel(struct subchannel *sch) } int -css_probe_device(int irq) +css_probe_device(struct subchannel_id schid) { int ret; struct subchannel *sch; - sch = css_alloc_subchannel(irq); + sch = css_alloc_subchannel(schid); if (IS_ERR(sch)) return PTR_ERR(sch); ret = css_register_subchannel(sch); @@ -132,26 +146,26 @@ static int check_subchannel(struct device * dev, void * data) { struct subchannel *sch; - int irq = (unsigned long)data; + struct subchannel_id *schid = data; sch = to_subchannel(dev); - return (sch->irq == irq); + return schid_equal(&sch->schid, schid); } struct subchannel * -get_subchannel_by_schid(int irq) +get_subchannel_by_schid(struct subchannel_id schid) { struct device *dev; dev = bus_find_device(&css_bus_type, NULL, - (void *)(unsigned long)irq, check_subchannel); + (void *)&schid, check_subchannel); return dev ? to_subchannel(dev) : NULL; } static inline int -css_get_subchannel_status(struct subchannel *sch, int schid) +css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid) { struct schib schib; int cc; @@ -170,13 +184,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid) } static int -css_evaluate_subchannel(int irq, int slow) +css_evaluate_subchannel(struct subchannel_id schid, int slow) { int event, ret, disc; struct subchannel *sch; unsigned long flags; - sch = get_subchannel_by_schid(irq); + sch = get_subchannel_by_schid(schid); disc = sch ? device_is_disconnected(sch) : 0; if (disc && slow) { if (sch) @@ -194,9 +208,10 @@ css_evaluate_subchannel(int irq, int slow) put_device(&sch->dev); return -EAGAIN; /* Will be done on the slow path. */ } - event = css_get_subchannel_status(sch, irq); - CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", - irq, event, sch?(disc?"disconnected":"normal"):"unknown", + event = css_get_subchannel_status(sch, schid); + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", + schid.ssid, schid.sch_no, event, + sch?(disc?"disconnected":"normal"):"unknown", slow?"slow":"fast"); switch (event) { case CIO_NO_PATH: @@ -253,7 +268,7 @@ css_evaluate_subchannel(int irq, int slow) sch->schib.pmcw.intparm = 0; cio_modify(sch); put_device(&sch->dev); - ret = css_probe_device(irq); + ret = css_probe_device(schid); } else { /* * We can't immediately deregister the disconnected @@ -272,7 +287,7 @@ css_evaluate_subchannel(int irq, int slow) device_trigger_reprobe(sch); spin_unlock_irqrestore(&sch->lock, flags); } - ret = sch ? 0 : css_probe_device(irq); + ret = sch ? 0 : css_probe_device(schid); break; default: BUG(); @@ -281,28 +296,15 @@ css_evaluate_subchannel(int irq, int slow) return ret; } -static void -css_rescan_devices(void) +static int +css_rescan_devices(struct subchannel_id schid, void *data) { - int irq, ret; - - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - ret = css_evaluate_subchannel(irq, 1); - /* No more memory. It doesn't make sense to continue. No - * panic because this can happen in midflight and just - * because we can't use a new device is no reason to crash - * the system. */ - if (ret == -ENOMEM) - break; - /* -ENXIO indicates that there are no more subchannels. */ - if (ret == -ENXIO) - break; - } + return css_evaluate_subchannel(schid, 1); } struct slow_subchannel { struct list_head slow_list; - unsigned long schid; + struct subchannel_id schid; }; static LIST_HEAD(slow_subchannels_head); @@ -315,7 +317,7 @@ css_trigger_slow_path(void) if (need_rescan) { need_rescan = 0; - css_rescan_devices(); + for_each_subchannel(css_rescan_devices, NULL); return; } @@ -354,23 +356,31 @@ css_reiterate_subchannels(void) * Called from the machine check handler for subchannel report words. */ int -css_process_crw(int irq) +css_process_crw(int rsid1, int rsid2) { int ret; + struct subchannel_id mchk_schid; - CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); + CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", + rsid1, rsid2); if (need_rescan) /* We need to iterate all subchannels anyway. */ return -EAGAIN; + + init_subchannel_id(&mchk_schid); + mchk_schid.sch_no = rsid1; + if (rsid2 != 0) + mchk_schid.ssid = (rsid2 >> 8) & 3; + /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come * or gone. */ - ret = css_evaluate_subchannel(irq, 0); + ret = css_evaluate_subchannel(mchk_schid, 0); if (ret == -EAGAIN) { - if (css_enqueue_subchannel_slow(irq)) { + if (css_enqueue_subchannel_slow(mchk_schid)) { css_clear_subchannel_slow_list(); need_rescan = 1; } @@ -378,22 +388,83 @@ css_process_crw(int irq) return ret; } -static void __init -css_generate_pgid(void) +static int __init +__init_channel_subsystem(struct subchannel_id schid, void *data) { - /* Let's build our path group ID here. */ - if (css_characteristics_avail && css_general_characteristics.mcss) - global_pgid.cpu_addr = 0x8000; + struct subchannel *sch; + int ret; + + if (cio_is_console(schid)) + sch = cio_get_console_subchannel(); else { + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + ret = PTR_ERR(sch); + else + ret = 0; + switch (ret) { + case 0: + break; + case -ENOMEM: + panic("Out of memory in init_channel_subsystem\n"); + /* -ENXIO: no more subchannels. */ + case -ENXIO: + return ret; + default: + return 0; + } + } + /* + * We register ALL valid subchannels in ioinfo, even those + * that have been present before init_channel_subsystem. + * These subchannels can't have been registered yet (kmalloc + * not working) so we do it now. This is true e.g. for the + * console subchannel. + */ + css_register_subchannel(sch); + return 0; +} + +static void __init +css_generate_pgid(struct channel_subsystem *css, u32 tod_high) +{ + if (css_characteristics_avail && css_general_characteristics.mcss) { + css->global_pgid.pgid_high.ext_cssid.version = 0x80; + css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; + } else { #ifdef CONFIG_SMP - global_pgid.cpu_addr = hard_smp_processor_id(); + css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); #else - global_pgid.cpu_addr = 0; + css->global_pgid.pgid_high.cpu_addr = 0; #endif } - global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; - global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; - global_pgid.tod_high = (__u32) (get_clock() >> 32); + css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; + css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; + css->global_pgid.tod_high = tod_high; + +} + +static void +channel_subsystem_release(struct device *dev) +{ + struct channel_subsystem *css; + + css = to_css(dev); + kfree(css); +} + +static inline void __init +setup_css(int nr) +{ + u32 tod_high; + + memset(css[nr], 0, sizeof(struct channel_subsystem)); + css[nr]->valid = 1; + css[nr]->cssid = nr; + sprintf(css[nr]->device.bus_id, "css%x", nr); + css[nr]->device.release = channel_subsystem_release; + tod_high = (u32) (get_clock() >> 32); + css_generate_pgid(css[nr], tod_high); } /* @@ -404,53 +475,50 @@ css_generate_pgid(void) static int __init init_channel_subsystem (void) { - int ret, irq; + int ret, i; if (chsc_determine_css_characteristics() == 0) css_characteristics_avail = 1; - css_generate_pgid(); - if ((ret = bus_register(&css_bus_type))) goto out; - if ((ret = device_register (&css_bus_device))) - goto out_bus; + /* Try to enable MSS. */ + ret = chsc_enable_facility(CHSC_SDA_OC_MSS); + switch (ret) { + case 0: /* Success. */ + max_ssid = __MAX_SSID; + break; + case -ENOMEM: + goto out_bus; + default: + max_ssid = 0; + } + /* Setup css structure. */ + for (i = 0; i <= __MAX_CSSID; i++) { + css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); + if (!css[i]) { + ret = -ENOMEM; + goto out_unregister; + } + setup_css(i); + ret = device_register(&css[i]->device); + if (ret) + goto out_free; + } css_init_done = 1; ctl_set_bit(6, 28); - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - struct subchannel *sch; - - if (cio_is_console(irq)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(irq); - if (IS_ERR(sch)) - ret = PTR_ERR(sch); - else - ret = 0; - if (ret == -ENOMEM) - panic("Out of memory in " - "init_channel_subsystem\n"); - /* -ENXIO: no more subchannels. */ - if (ret == -ENXIO) - break; - if (ret) - continue; - } - /* - * We register ALL valid subchannels in ioinfo, even those - * that have been present before init_channel_subsystem. - * These subchannels can't have been registered yet (kmalloc - * not working) so we do it now. This is true e.g. for the - * console subchannel. - */ - css_register_subchannel(sch); - } + for_each_subchannel(__init_channel_subsystem, NULL); return 0; - +out_free: + kfree(css[i]); +out_unregister: + while (i > 0) { + i--; + device_unregister(&css[i]->device); + } out_bus: bus_unregister(&css_bus_type); out: @@ -481,47 +549,8 @@ struct bus_type css_bus_type = { subsys_initcall(init_channel_subsystem); -/* - * Register root devices for some drivers. The release function must not be - * in the device drivers, so we do it here. - */ -static void -s390_root_dev_release(struct device *dev) -{ - kfree(dev); -} - -struct device * -s390_root_dev_register(const char *name) -{ - struct device *dev; - int ret; - - if (!strlen(name)) - return ERR_PTR(-EINVAL); - dev = kmalloc(sizeof(struct device), GFP_KERNEL); - if (!dev) - return ERR_PTR(-ENOMEM); - memset(dev, 0, sizeof(struct device)); - strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); - dev->release = s390_root_dev_release; - ret = device_register(dev); - if (ret) { - kfree(dev); - return ERR_PTR(ret); - } - return dev; -} - -void -s390_root_dev_unregister(struct device *dev) -{ - if (dev) - device_unregister(dev); -} - int -css_enqueue_subchannel_slow(unsigned long schid) +css_enqueue_subchannel_slow(struct subchannel_id schid) { struct slow_subchannel *new_slow_sch; unsigned long flags; @@ -564,6 +593,4 @@ css_slow_subchannels_exist(void) MODULE_LICENSE("GPL"); EXPORT_SYMBOL(css_bus_type); -EXPORT_SYMBOL(s390_root_dev_register); -EXPORT_SYMBOL(s390_root_dev_unregister); EXPORT_SYMBOL_GPL(css_characteristics_avail); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 2004a6c..251ebd7 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -6,6 +6,8 @@ #include <asm/cio.h> +#include "schid.h" + /* * path grouping stuff */ @@ -33,19 +35,25 @@ struct path_state { __u8 resvd : 3; /* reserved */ } __attribute__ ((packed)); +struct extended_cssid { + u8 version; + u8 cssid; +} __attribute__ ((packed)); + struct pgid { union { __u8 fc; /* SPID function code */ struct path_state ps; /* SNID path state */ } inf; - __u32 cpu_addr : 16; /* CPU address */ + union { + __u32 cpu_addr : 16; /* CPU address */ + struct extended_cssid ext_cssid; + } pgid_high; __u32 cpu_id : 24; /* CPU identification */ __u32 cpu_model : 16; /* CPU model */ __u32 tod_high; /* high word TOD clock */ } __attribute__ ((packed)); -extern struct pgid global_pgid; - #define MAX_CIWS 8 /* @@ -68,7 +76,8 @@ struct ccw_device_private { atomic_t onoff; unsigned long registered; __u16 devno; /* device number */ - __u16 irq; /* subchannel number */ + __u16 sch_no; /* subchannel number */ + __u8 ssid; /* subchannel set id */ __u8 imask; /* lpm mask for SNID/SID/SPGID */ int iretry; /* retry counter SNID/SID/SPGID */ struct { @@ -121,15 +130,27 @@ struct css_driver { extern struct bus_type css_bus_type; extern struct css_driver io_subchannel_driver; -int css_probe_device(int irq); -extern struct subchannel * get_subchannel_by_schid(int irq); -extern unsigned int highest_subchannel; +extern int css_probe_device(struct subchannel_id); +extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; - -#define __MAX_SUBCHANNELS 65536 +extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); + +#define __MAX_SUBCHANNEL 65535 +#define __MAX_SSID 3 +#define __MAX_CHPID 255 +#define __MAX_CSSID 0 + +struct channel_subsystem { + u8 cssid; + int valid; + struct channel_path *chps[__MAX_CHPID]; + struct device device; + struct pgid global_pgid; +}; +#define to_css(dev) container_of(dev, struct channel_subsystem, device) extern struct bus_type css_bus_type; -extern struct device css_bus_device; +extern struct channel_subsystem *css[]; /* Some helper functions for disconnected state. */ int device_is_disconnected(struct subchannel *); @@ -144,7 +165,7 @@ void device_set_waiting(struct subchannel *); void device_kill_pending_timer(struct subchannel *); /* Helper functions to build lists for the slow path. */ -int css_enqueue_subchannel_slow(unsigned long schid); +extern int css_enqueue_subchannel_slow(struct subchannel_id schid); void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); void css_clear_subchannel_slow_list(void); int css_slow_subchannels_exist(void); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 85908ca..fa3e4c0 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/device.c * bus driver for ccw devices - * $Revision: 1.131 $ + * $Revision: 1.137 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf int i, force, ret; char *tmp; - if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) return -EAGAIN; if (cdev->drv && !try_module_get(cdev->drv->owner)) { @@ -535,7 +535,8 @@ ccw_device_register(struct ccw_device *cdev) } struct match_data { - unsigned int devno; + unsigned int devno; + unsigned int ssid; struct ccw_device * sibling; }; @@ -548,6 +549,7 @@ match_devno(struct device * dev, void * data) cdev = to_ccwdev(dev); if ((cdev->private->state == DEV_STATE_DISCONNECTED) && (cdev->private->devno == d->devno) && + (cdev->private->ssid == d->ssid) && (cdev != d->sibling)) { cdev->private->state = DEV_STATE_NOT_OPER; return 1; @@ -556,11 +558,13 @@ match_devno(struct device * dev, void * data) } static struct ccw_device * -get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) +get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid, + struct ccw_device *sibling) { struct device *dev; struct match_data data = { - .devno = devno, + .devno = devno, + .ssid = ssid, .sibling = sibling, }; @@ -616,13 +620,13 @@ ccw_device_do_unreg_rereg(void *data) need_rename = 1; other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, - cdev); + sch->schid.ssid, cdev); if (other_cdev) { struct subchannel *other_sch; other_sch = to_subchannel(other_cdev->dev.parent); if (get_device(&other_sch->dev)) { - stsch(other_sch->irq, &other_sch->schib); + stsch(other_sch->schid, &other_sch->schib); if (other_sch->schib.pmcw.dnv) { other_sch->schib.pmcw.intparm = 0; cio_modify(other_sch); @@ -639,8 +643,8 @@ ccw_device_do_unreg_rereg(void *data) if (test_and_clear_bit(1, &cdev->private->registered)) device_del(&cdev->dev); if (need_rename) - snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", - sch->schib.pmcw.dev); + snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", + sch->schid.ssid, sch->schib.pmcw.dev); PREPARE_WORK(&cdev->private->kick_work, ccw_device_add_changed, (void *)cdev); queue_work(ccw_device_work, &cdev->private->kick_work); @@ -769,18 +773,20 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) sch->dev.driver_data = cdev; sch->driver = &io_subchannel_driver; cdev->ccwlock = &sch->lock; + /* Init private data. */ priv = cdev->private; priv->devno = sch->schib.pmcw.dev; - priv->irq = sch->irq; + priv->ssid = sch->schid.ssid; + priv->sch_no = sch->schid.sch_no; priv->state = DEV_STATE_NOT_OPER; INIT_LIST_HEAD(&priv->cmb_list); init_waitqueue_head(&priv->wait_q); init_timer(&priv->timer); /* Set an initial name for the device. */ - snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", - sch->schib.pmcw.dev); + snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", + sch->schid.ssid, sch->schib.pmcw.dev); /* Increase counter of devices currently in recognition. */ atomic_inc(&ccw_device_init_count); @@ -951,7 +957,7 @@ io_subchannel_shutdown(struct device *dev) sch = to_subchannel(dev); cdev = dev->driver_data; - if (cio_is_console(sch->irq)) + if (cio_is_console(sch->schid)) return; if (!sch->schib.pmcw.ena) /* Nothing to do. */ @@ -986,10 +992,6 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) cdev->dev = (struct device) { .parent = &sch->dev, }; - /* Initialize the subchannel structure */ - sch->dev.parent = &css_bus_device; - sch->dev.bus = &css_bus_type; - rc = io_subchannel_recog(cdev, sch); if (rc) return rc; @@ -1146,6 +1148,16 @@ ccw_driver_unregister (struct ccw_driver *cdriver) driver_unregister(&cdriver->driver); } +/* Helper func for qdio. */ +struct subchannel_id +ccw_device_get_subchannel_id(struct ccw_device *cdev) +{ + struct subchannel *sch; + + sch = to_subchannel(cdev->dev.parent); + return sch->schid; +} + MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); @@ -1155,3 +1167,4 @@ EXPORT_SYMBOL(get_ccwdev_by_busid); EXPORT_SYMBOL(ccw_bus_type); EXPORT_SYMBOL(ccw_device_work); EXPORT_SYMBOL(ccw_device_notify_work); +EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index a3aa056..11587eb 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -110,6 +110,7 @@ int ccw_device_stlck(struct ccw_device *); /* qdio needs this. */ void ccw_device_set_timeout(struct ccw_device *, int); +extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); void retry_set_schib(struct ccw_device *cdev); #endif diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c1c89f4..23d12b6 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -133,7 +133,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) int ret; sch = to_subchannel(cdev->dev.parent); - ret = stsch(sch->irq, &sch->schib); + ret = stsch(sch->schid, &sch->schib); if (ret || !sch->schib.pmcw.dnv) return -ENODEV; if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) @@ -231,7 +231,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) * through ssch() and the path information is up to date. */ old_lpm = sch->lpm; - stsch(sch->irq, &sch->schib); + stsch(sch->schid, &sch->schib); sch->lpm = sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom & @@ -257,8 +257,9 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) switch (state) { case DEV_STATE_NOT_OPER: CIO_DEBUG(KERN_WARNING, 2, - "SenseID : unknown device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + "SenseID : unknown device %04x on subchannel " + "0.%x.%04x\n", cdev->private->devno, + sch->schid.ssid, sch->schid.sch_no); break; case DEV_STATE_OFFLINE: if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { @@ -282,16 +283,18 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) return; } /* Issue device info message. */ - CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " + CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " "CU Type/Mod = %04X/%02X, Dev Type/Mod = " - "%04X/%02X\n", cdev->private->devno, + "%04X/%02X\n", + cdev->private->ssid, cdev->private->devno, cdev->id.cu_type, cdev->id.cu_model, cdev->id.dev_type, cdev->id.dev_model); break; case DEV_STATE_BOXED: CIO_DEBUG(KERN_WARNING, 2, - "SenseID : boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + "SenseID : boxed device %04x on subchannel " + "0.%x.%04x\n", cdev->private->devno, + sch->schid.ssid, sch->schid.sch_no); break; } cdev->private->state = state; @@ -359,7 +362,7 @@ ccw_device_done(struct ccw_device *cdev, int state) if (state == DEV_STATE_BOXED) CIO_DEBUG(KERN_WARNING, 2, "Boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + cdev->private->devno, sch->schid.sch_no); if (cdev->private->flags.donotify) { cdev->private->flags.donotify = 0; @@ -592,7 +595,7 @@ ccw_device_offline(struct ccw_device *cdev) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv) + if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE) { if (sch->schib.scsw.actl != 0) @@ -711,7 +714,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) * Since we might not just be coming from an interrupt from the * subchannel we have to update the schib. */ - stsch(sch->irq, &sch->schib); + stsch(sch->schid, &sch->schib); if (sch->schib.scsw.actl != 0 || (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { @@ -923,7 +926,7 @@ ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) /* Iff device is idle, reset timeout. */ sch = to_subchannel(cdev->dev.parent); - if (!stsch(sch->irq, &sch->schib)) + if (!stsch(sch->schid, &sch->schib)) if (sch->schib.scsw.actl == 0) ccw_device_set_timeout(cdev, 0); /* Call the handler. */ @@ -1035,7 +1038,7 @@ device_trigger_reprobe(struct subchannel *sch) return; /* Update some values. */ - if (stsch(sch->irq, &sch->schib)) + if (stsch(sch->schid, &sch->schib)) return; /* diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 0e68fb5..04ceba3 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -27,7 +27,7 @@ /* * diag210 is used under VM to get information about a virtual device */ -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT int diag210(struct diag210 * addr) { @@ -256,16 +256,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev) * sense id information. So, for intervention required, * we use the "whack it until it talks" strategy... */ - CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " - "reports cmd reject\n", - cdev->private->devno, sch->irq); + CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel " + "0.%x.%04x reports cmd reject\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no); return -EOPNOTSUPP; } if (irb->esw.esw0.erw.cons) { - CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, " + CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " "lpum %02X, cnt %02d, sns :" " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", - cdev->private->devno, + cdev->private->ssid, cdev->private->devno, irb->esw.esw0.sublog.lpum, irb->esw.esw0.erw.scnt, irb->ecw[0], irb->ecw[1], @@ -277,16 +278,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev) if (irb->scsw.cc == 3) { if ((sch->orb.lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) - CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" - " subchannel %04x is 'not operational'\n", - sch->orb.lpm, cdev->private->devno, - sch->irq); + CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " + "on subchannel 0.%x.%04x is " + "'not operational'\n", sch->orb.lpm, + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no); return -EACCES; } /* Hmm, whatever happened, try again. */ CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " - "subchannel %04x returns status %02X%02X\n", - cdev->private->devno, sch->irq, + "subchannel 0.%x.%04x returns status %02X%02X\n", + cdev->private->devno, sch->schid.ssid, sch->schid.sch_no, irb->scsw.dstat, irb->scsw.cstat); return -EAGAIN; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 85a3026..143b6c2 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/device_ops.c * - * $Revision: 1.57 $ + * $Revision: 1.58 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -570,7 +570,7 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) int _ccw_device_get_subchannel_number(struct ccw_device *cdev) { - return cdev->private->irq; + return cdev->private->sch_no; } int diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 0adac8a..052832d 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -22,6 +22,7 @@ #include "cio_debug.h" #include "css.h" #include "device.h" +#include "ioasm.h" /* * Start Sense Path Group ID helper function. Used in ccw_device_recog @@ -56,10 +57,10 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) if (ret != -EACCES) return ret; CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not " + "0.%x.%04x, lpm %02X, became 'not " "operational'\n", - cdev->private->devno, sch->irq, - cdev->private->imask); + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); } cdev->private->imask >>= 1; @@ -105,10 +106,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) return -EOPNOTSUPP; } if (irb->esw.esw0.erw.cons) { - CIO_MSG_EVENT(2, "SNID - device %04x, unit check, " + CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, " "lpum %02X, cnt %02d, sns : " "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", - cdev->private->devno, + cdev->private->ssid, cdev->private->devno, irb->esw.esw0.sublog.lpum, irb->esw.esw0.erw.scnt, irb->ecw[0], irb->ecw[1], @@ -118,15 +119,17 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) return -EAGAIN; } if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, sch->orb.lpm); + CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," + " lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, sch->orb.lpm); return -EACCES; } if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { - CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " + CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x " "is reserved by someone else\n", - cdev->private->devno, sch->irq); + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no); return -EUSERS; } return 0; @@ -162,7 +165,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ case 0: /* Sense Path Group ID successful. */ if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) - memcpy(&cdev->private->pgid, &global_pgid, + memcpy(&cdev->private->pgid, &css[0]->global_pgid, sizeof(struct pgid)); ccw_device_sense_pgid_done(cdev, 0); break; @@ -235,8 +238,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) sch->lpm &= ~cdev->private->imask; sch->vpm &= ~cdev->private->imask; CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, cdev->private->imask); + "0.%x.%04x, lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); return ret; } @@ -258,8 +262,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev) if (irb->ecw[0] & SNS0_CMD_REJECT) return -EOPNOTSUPP; /* Hmm, whatever happened, try again. */ - CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, " + CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, " + "cnt %02d, " "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", + cdev->private->ssid, cdev->private->devno, irb->esw.esw0.erw.scnt, irb->ecw[0], irb->ecw[1], irb->ecw[2], irb->ecw[3], @@ -268,10 +274,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev) return -EAGAIN; } if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " - "%04x, lpm %02X, became 'not operational'\n", - cdev->private->devno, sch->irq, - cdev->private->imask); + CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x," + " lpm %02X, became 'not operational'\n", + cdev->private->devno, sch->schid.ssid, + sch->schid.sch_no, cdev->private->imask); return -EACCES; } return 0; @@ -364,8 +370,22 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) void ccw_device_verify_start(struct ccw_device *cdev) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); + cdev->private->flags.pgid_single = 0; cdev->private->iretry = 5; + /* + * Update sch->lpm with current values to catch paths becoming + * available again. + */ + if (stsch(sch->schid, &sch->schib)) { + ccw_device_verify_done(cdev, -ENODEV); + return; + } + sch->lpm = sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom & + sch->opm; __ccw_device_verify_start(cdev); } diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 12a24d4..db09c20 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -36,15 +36,16 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " "received" - " ... device %04X on subchannel %04X, dev_stat " + " ... device %04x on subchannel 0.%x.%04x, dev_stat " ": %02X sch_stat : %02X\n", - cdev->private->devno, cdev->private->irq, + cdev->private->devno, cdev->private->ssid, + cdev->private->sch_no, irb->scsw.dstat, irb->scsw.cstat); if (irb->scsw.cc != 3) { char dbf_text[15]; - sprintf(dbf_text, "chk%x", cdev->private->irq); + sprintf(dbf_text, "chk%x", cdev->private->sch_no); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, irb, sizeof (struct irb)); } @@ -59,10 +60,11 @@ ccw_device_path_notoper(struct ccw_device *cdev) struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); - CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " - "not operational \n", __FUNCTION__, sch->irq, + CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " + "not operational \n", __FUNCTION__, + sch->schid.ssid, sch->schid.sch_no, sch->schib.pmcw.pnom); sch->lpm &= ~sch->schib.pmcw.pnom; diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 45480a2..95a9462 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -1,12 +1,13 @@ #ifndef S390_CIO_IOASM_H #define S390_CIO_IOASM_H +#include "schid.h" + /* * TPI info structure */ struct tpi_info { - __u32 reserved1 : 16; /* reserved 0x00000001 */ - __u32 irq : 16; /* aka. subchannel number */ + struct subchannel_id schid; __u32 intparm; /* interruption parameter */ __u32 adapter_IO : 1; __u32 reserved2 : 1; @@ -21,7 +22,8 @@ struct tpi_info { * Some S390 specific IO instructions as inline */ -static inline int stsch(int irq, volatile struct schib *addr) +static inline int stsch(struct subchannel_id schid, + volatile struct schib *addr) { int ccode; @@ -31,12 +33,42 @@ static inline int stsch(int irq, volatile struct schib *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) + : "cc", "1" ); + return ccode; +} + +static inline int stsch_err(struct subchannel_id schid, + volatile struct schib *addr) +{ + int ccode; + + __asm__ __volatile__( + " lhi %0,%3\n" + " lr 1,%1\n" + " stsch 0(%2)\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" +#ifdef CONFIG_64BIT + ".section __ex_table,\"a\"\n" + " .align 8\n" + " .quad 0b,1b\n" + ".previous" +#else + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,1b\n" + ".previous" +#endif + : "=&d" (ccode) + : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int msch(int irq, volatile struct schib *addr) +static inline int msch(struct subchannel_id schid, + volatile struct schib *addr) { int ccode; @@ -46,12 +78,13 @@ static inline int msch(int irq, volatile struct schib *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int msch_err(int irq, volatile struct schib *addr) +static inline int msch_err(struct subchannel_id schid, + volatile struct schib *addr) { int ccode; @@ -62,7 +95,7 @@ static inline int msch_err(int irq, volatile struct schib *addr) "0: ipm %0\n" " srl %0,28\n" "1:\n" -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 0b,1b\n" @@ -74,12 +107,13 @@ static inline int msch_err(int irq, volatile struct schib *addr) ".previous" #endif : "=&d" (ccode) - : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO) + : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int tsch(int irq, volatile struct irb *addr) +static inline int tsch(struct subchannel_id schid, + volatile struct irb *addr) { int ccode; @@ -89,7 +123,7 @@ static inline int tsch(int irq, volatile struct irb *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } @@ -103,12 +137,13 @@ static inline int tpi( volatile struct tpi_info *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "a" (addr) + : "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int ssch(int irq, volatile struct orb *addr) +static inline int ssch(struct subchannel_id schid, + volatile struct orb *addr) { int ccode; @@ -118,12 +153,12 @@ static inline int ssch(int irq, volatile struct orb *addr) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L), "a" (addr) + : "d" (schid), "a" (addr), "m" (*addr) : "cc", "1" ); return ccode; } -static inline int rsch(int irq) +static inline int rsch(struct subchannel_id schid) { int ccode; @@ -133,12 +168,12 @@ static inline int rsch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } -static inline int csch(int irq) +static inline int csch(struct subchannel_id schid) { int ccode; @@ -148,12 +183,12 @@ static inline int csch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } -static inline int hsch(int irq) +static inline int hsch(struct subchannel_id schid) { int ccode; @@ -163,12 +198,12 @@ static inline int hsch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } -static inline int xsch(int irq) +static inline int xsch(struct subchannel_id schid) { int ccode; @@ -178,21 +213,22 @@ static inline int xsch(int irq) " ipm %0\n" " srl %0,28" : "=d" (ccode) - : "d" (irq | 0x10000L) + : "d" (schid) : "cc", "1" ); return ccode; } static inline int chsc(void *chsc_area) { + typedef struct { char _[4096]; } addr_type; int cc; __asm__ __volatile__ ( - ".insn rre,0xb25f0000,%1,0 \n\t" + ".insn rre,0xb25f0000,%2,0 \n\t" "ipm %0 \n\t" "srl %0,28 \n\t" - : "=d" (cc) - : "d" (chsc_area) + : "=d" (cc), "=m" (*(addr_type *) chsc_area) + : "d" (chsc_area), "m" (*(addr_type *) chsc_area) : "cc" ); return cc; diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index eb39218..30a836f 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -56,7 +56,7 @@ #include "ioasm.h" #include "chsc.h" -#define VERSION_QDIO_C "$Revision: 1.108 $" +#define VERSION_QDIO_C "$Revision: 1.114 $" /****************** MODULE PARAMETER VARIABLES ********************/ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); @@ -76,6 +76,7 @@ static struct qdio_perf_stats perf_stats; #endif /* QDIO_PERFORMANCE_STATS */ static int hydra_thinints; +static int is_passthrough = 0; static int omit_svs; static int indicator_used[INDICATORS_PER_CACHELINE]; @@ -136,12 +137,126 @@ qdio_release_q(struct qdio_q *q) atomic_dec(&q->use_count); } -static volatile inline void -qdio_set_slsb(volatile char *slsb, unsigned char value) +/*check ccq */ +static inline int +qdio_check_ccq(struct qdio_q *q, unsigned int ccq) +{ + char dbf_text[15]; + + if (ccq == 0 || ccq == 32 || ccq == 96) + return 0; + if (ccq == 97) + return 1; + /*notify devices immediately*/ + sprintf(dbf_text,"%d", ccq); + QDIO_DBF_TEXT2(1,trace,dbf_text); + return -EIO; +} +/* EQBS: extract buffer states */ +static inline int +qdio_do_eqbs(struct qdio_q *q, unsigned char *state, + unsigned int *start, unsigned int *cnt) +{ + struct qdio_irq *irq; + unsigned int tmp_cnt, q_no, ccq; + int rc ; + char dbf_text[15]; + + ccq = 0; + tmp_cnt = *cnt; + irq = (struct qdio_irq*)q->irq_ptr; + q_no = q->q_no; + if(!q->is_input_q) + q_no += irq->no_input_qs; + ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); + rc = qdio_check_ccq(q, ccq); + if (rc < 0) { + QDIO_DBF_TEXT2(1,trace,"eqberr"); + sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); + QDIO_DBF_TEXT2(1,trace,dbf_text); + q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| + QDIO_STATUS_LOOK_FOR_ERROR, + 0, 0, 0, -1, -1, q->int_parm); + return 0; + } + return (tmp_cnt - *cnt); +} + +/* SQBS: set buffer states */ +static inline int +qdio_do_sqbs(struct qdio_q *q, unsigned char state, + unsigned int *start, unsigned int *cnt) { - xchg((char*)slsb,value); + struct qdio_irq *irq; + unsigned int tmp_cnt, q_no, ccq; + int rc; + char dbf_text[15]; + + ccq = 0; + tmp_cnt = *cnt; + irq = (struct qdio_irq*)q->irq_ptr; + q_no = q->q_no; + if(!q->is_input_q) + q_no += irq->no_input_qs; + ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); + rc = qdio_check_ccq(q, ccq); + if (rc < 0) { + QDIO_DBF_TEXT3(1,trace,"sqberr"); + sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no); + QDIO_DBF_TEXT3(1,trace,dbf_text); + q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| + QDIO_STATUS_LOOK_FOR_ERROR, + 0, 0, 0, -1, -1, q->int_parm); + return 0; + } + return (tmp_cnt - *cnt); } +static inline int +qdio_set_slsb(struct qdio_q *q, unsigned int *bufno, + unsigned char state, unsigned int *count) +{ + volatile char *slsb; + struct qdio_irq *irq; + + irq = (struct qdio_irq*)q->irq_ptr; + if (!irq->is_qebsm) { + slsb = (char *)&q->slsb.acc.val[(*bufno)]; + xchg(slsb, state); + return 1; + } + return qdio_do_sqbs(q, state, bufno, count); +} + +#ifdef CONFIG_QDIO_DEBUG +static inline void +qdio_trace_slsb(struct qdio_q *q) +{ + if (q->queue_type==QDIO_TRACE_QTYPE) { + if (q->is_input_q) + QDIO_DBF_HEX2(0,slsb_in,&q->slsb, + QDIO_MAX_BUFFERS_PER_Q); + else + QDIO_DBF_HEX2(0,slsb_out,&q->slsb, + QDIO_MAX_BUFFERS_PER_Q); + } +} +#endif + +static inline int +set_slsb(struct qdio_q *q, unsigned int *bufno, + unsigned char state, unsigned int *count) +{ + int rc; +#ifdef CONFIG_QDIO_DEBUG + qdio_trace_slsb(q); +#endif + rc = qdio_set_slsb(q, bufno, state, count); +#ifdef CONFIG_QDIO_DEBUG + qdio_trace_slsb(q); +#endif + return rc; +} static inline int qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, unsigned int gpr3) @@ -155,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, perf_stats.siga_syncs++; #endif /* QDIO_PERFORMANCE_STATS */ - cc = do_siga_sync(q->irq, gpr2, gpr3); + cc = do_siga_sync(q->schid, gpr2, gpr3); if (cc) QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); @@ -170,6 +285,23 @@ qdio_siga_sync_q(struct qdio_q *q) return qdio_siga_sync(q, q->mask, 0); } +static int +__do_siga_output(struct qdio_q *q, unsigned int *busy_bit) +{ + struct qdio_irq *irq; + unsigned int fc = 0; + unsigned long schid; + + irq = (struct qdio_irq *) q->irq_ptr; + if (!irq->is_qebsm) + schid = *((u32 *)&q->schid); + else { + schid = irq->sch_token; + fc |= 0x80; + } + return do_siga_output(schid, q->mask, busy_bit, fc); +} + /* * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns * an access exception @@ -189,7 +321,7 @@ qdio_siga_output(struct qdio_q *q) QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); for (;;) { - cc = do_siga_output(q->irq, q->mask, &busy_bit); + cc = __do_siga_output(q, &busy_bit); //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { if (!start_time) @@ -221,7 +353,7 @@ qdio_siga_input(struct qdio_q *q) perf_stats.siga_ins++; #endif /* QDIO_PERFORMANCE_STATS */ - cc = do_siga_input(q->irq, q->mask); + cc = do_siga_input(q->schid, q->mask); if (cc) QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); @@ -230,7 +362,7 @@ qdio_siga_input(struct qdio_q *q) } /* locked by the locks in qdio_activate and qdio_cleanup */ -static __u32 volatile * +static __u32 * qdio_get_indicator(void) { int i; @@ -258,7 +390,7 @@ qdio_put_indicator(__u32 *addr) atomic_dec(&spare_indicator_usecount); } -static inline volatile void +static inline void tiqdio_clear_summary_bit(__u32 *location) { QDIO_DBF_TEXT5(0,trace,"clrsummb"); @@ -267,7 +399,7 @@ tiqdio_clear_summary_bit(__u32 *location) xchg(location,0); } -static inline volatile void +static inline void tiqdio_set_summary_bit(__u32 *location) { QDIO_DBF_TEXT5(0,trace,"setsummb"); @@ -336,7 +468,9 @@ static inline int qdio_stop_polling(struct qdio_q *q) { #ifdef QDIO_USE_PROCESSING_STATE - int gsf; + unsigned int tmp, gsf, count = 1; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; if (!atomic_swap(&q->polling,0)) return 1; @@ -348,17 +482,22 @@ qdio_stop_polling(struct qdio_q *q) if (!q->is_input_q) return 1; - gsf=GET_SAVED_FRONTIER(q); - set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& - (QDIO_MAX_BUFFERS_PER_Q-1)], - SLSB_P_INPUT_NOT_INIT); + tmp = gsf = GET_SAVED_FRONTIER(q); + tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) ); + set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count); + /* * we don't issue this SYNC_MEMORY, as we trust Rick T and * moreover will not use the PROCESSING state under VM, so * q->polling was 0 anyway */ /*SYNC_MEMORY;*/ - if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) + if (irq->is_qebsm) { + count = 1; + qdio_do_eqbs(q, &state, &gsf, &count); + } else + state = q->slsb.acc.val[gsf]; + if (state != SLSB_P_INPUT_PRIMED) return 1; /* * set our summary bit again, as otherwise there is a @@ -431,18 +570,136 @@ tiqdio_clear_global_summary(void) /************************* OUTBOUND ROUTINES *******************************/ +static int +qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q) +{ + struct qdio_irq *irq; + unsigned char state; + unsigned int cnt, count, ftc; + + irq = (struct qdio_irq *) q->irq_ptr; + if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) + SYNC_MEMORY; + + ftc = q->first_to_check; + count = qdio_min(atomic_read(&q->number_of_buffers_used), + (QDIO_MAX_BUFFERS_PER_Q-1)); + if (count == 0) + return q->first_to_check; + cnt = qdio_do_eqbs(q, &state, &ftc, &count); + if (cnt == 0) + return q->first_to_check; + switch (state) { + case SLSB_P_OUTPUT_ERROR: + QDIO_DBF_TEXT3(0,trace,"outperr"); + atomic_sub(cnt , &q->number_of_buffers_used); + if (q->qdio_error) + q->error_status_flags |= + QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; + q->qdio_error = SLSB_P_OUTPUT_ERROR; + q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; + q->first_to_check = ftc; + break; + case SLSB_P_OUTPUT_EMPTY: + QDIO_DBF_TEXT5(0,trace,"outpempt"); + atomic_sub(cnt, &q->number_of_buffers_used); + q->first_to_check = ftc; + break; + case SLSB_CU_OUTPUT_PRIMED: + /* all buffers primed */ + QDIO_DBF_TEXT5(0,trace,"outpprim"); + break; + default: + break; + } + QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); + return q->first_to_check; +} + +static int +qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) +{ + struct qdio_irq *irq; + unsigned char state; + int tmp, ftc, count, cnt; + char dbf_text[15]; + + + irq = (struct qdio_irq *) q->irq_ptr; + ftc = q->first_to_check; + count = qdio_min(atomic_read(&q->number_of_buffers_used), + (QDIO_MAX_BUFFERS_PER_Q-1)); + if (count == 0) + return q->first_to_check; + cnt = qdio_do_eqbs(q, &state, &ftc, &count); + if (cnt == 0) + return q->first_to_check; + switch (state) { + case SLSB_P_INPUT_ERROR : +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT3(1,trace,"inperr"); + sprintf(dbf_text,"%2x,%2x",ftc,count); + QDIO_DBF_TEXT3(1,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + if (q->qdio_error) + q->error_status_flags |= + QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; + q->qdio_error = SLSB_P_INPUT_ERROR; + q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; + atomic_sub(cnt, &q->number_of_buffers_used); + q->first_to_check = ftc; + break; + case SLSB_P_INPUT_PRIMED : + QDIO_DBF_TEXT3(0,trace,"inptprim"); + sprintf(dbf_text,"%2x,%2x",ftc,count); + QDIO_DBF_TEXT3(1,trace,dbf_text); + tmp = 0; + ftc = q->first_to_check; +#ifdef QDIO_USE_PROCESSING_STATE + if (cnt > 1) { + cnt -= 1; + tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); + if (!tmp) + break; + } + cnt = 1; + tmp += set_slsb(q, &ftc, + SLSB_P_INPUT_PROCESSING, &cnt); + atomic_set(&q->polling, 1); +#else + tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); +#endif + atomic_sub(tmp, &q->number_of_buffers_used); + q->first_to_check = ftc; + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_PROCESSING: + QDIO_DBF_TEXT5(0,trace,"inpnipro"); + break; + default: + break; + } + QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); + return q->first_to_check; +} static inline int qdio_get_outbound_buffer_frontier(struct qdio_q *q) { - int f,f_mod_no; - volatile char *slsb; - int first_not_to_check; + struct qdio_irq *irq; + volatile char *slsb; + unsigned int count = 1; + int first_not_to_check, f, f_mod_no; char dbf_text[15]; QDIO_DBF_TEXT4(0,trace,"getobfro"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + irq = (struct qdio_irq *) q->irq_ptr; + if (irq->is_qebsm) + return qdio_qebsm_get_outbound_buffer_frontier(q); + slsb=&q->slsb.acc.val[0]; f_mod_no=f=q->first_to_check; /* @@ -484,7 +741,7 @@ check_next: QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); /* kind of process the buffer */ - set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count); /* * we increment the frontier, as this buffer @@ -597,48 +854,48 @@ qdio_kick_outbound_q(struct qdio_q *q) result=qdio_siga_output(q); - switch (result) { - case 0: - /* went smooth this time, reset timestamp */ + switch (result) { + case 0: + /* went smooth this time, reset timestamp */ #ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(0,trace,"cc2reslv"); - sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); + QDIO_DBF_TEXT3(0,trace,"cc2reslv"); + sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, + atomic_read(&q->busy_siga_counter)); + QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ - q->timing.busy_start=0; + q->timing.busy_start=0; + break; + case (2|QDIO_SIGA_ERROR_B_BIT_SET): + /* cc=2 and busy bit: */ + atomic_inc(&q->busy_siga_counter); + + /* if the last siga was successful, save + * timestamp here */ + if (!q->timing.busy_start) + q->timing.busy_start=NOW; + + /* if we're in time, don't touch error_status_flags + * and siga_error */ + if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { + qdio_mark_q(q); break; - case (2|QDIO_SIGA_ERROR_B_BIT_SET): - /* cc=2 and busy bit: */ - atomic_inc(&q->busy_siga_counter); - - /* if the last siga was successful, save - * timestamp here */ - if (!q->timing.busy_start) - q->timing.busy_start=NOW; - - /* if we're in time, don't touch error_status_flags - * and siga_error */ - if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { - qdio_mark_q(q); - break; - } - QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); + } + QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); #ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); + sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, + atomic_read(&q->busy_siga_counter)); + QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ - /* else fallthrough and report error */ - default: - /* for plain cc=1, 2 or 3: */ - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; + /* else fallthrough and report error */ + default: + /* for plain cc=1, 2 or 3: */ + if (q->siga_error) q->error_status_flags|= - QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } + QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; + q->error_status_flags|= + QDIO_STATUS_LOOK_FOR_ERROR; + q->siga_error=result; + } } static inline void @@ -743,8 +1000,10 @@ qdio_outbound_processing(struct qdio_q *q) static inline int qdio_get_inbound_buffer_frontier(struct qdio_q *q) { + struct qdio_irq *irq; int f,f_mod_no; volatile char *slsb; + unsigned int count = 1; int first_not_to_check; #ifdef CONFIG_QDIO_DEBUG char dbf_text[15]; @@ -756,6 +1015,10 @@ qdio_get_inbound_buffer_frontier(struct qdio_q *q) QDIO_DBF_TEXT4(0,trace,"getibfro"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + irq = (struct qdio_irq *) q->irq_ptr; + if (irq->is_qebsm) + return qdio_qebsm_get_inbound_buffer_frontier(q); + slsb=&q->slsb.acc.val[0]; f_mod_no=f=q->first_to_check; /* @@ -792,19 +1055,19 @@ check_next: * kill VM in terms of CP overhead */ if (q->siga_sync) { - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); } else { /* set the previous buffer to NOT_INIT. The current * buffer will be set to PROCESSING at the end of * this function to avoid further interrupts. */ if (last_position>=0) - set_slsb(&slsb[last_position], - SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &last_position, + SLSB_P_INPUT_NOT_INIT, &count); atomic_set(&q->polling,1); last_position=f_mod_no; } #else /* QDIO_USE_PROCESSING_STATE */ - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); #endif /* QDIO_USE_PROCESSING_STATE */ /* * not needed, as the inbound queue will be synced on the next @@ -829,7 +1092,7 @@ check_next: QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); /* kind of process the buffer */ - set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); if (q->qdio_error) q->error_status_flags|= @@ -857,7 +1120,7 @@ out: #ifdef QDIO_USE_PROCESSING_STATE if (last_position>=0) - set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); + set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); #endif /* QDIO_USE_PROCESSING_STATE */ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); @@ -902,6 +1165,10 @@ static inline int tiqdio_is_inbound_q_done(struct qdio_q *q) { int no_used; + unsigned int start_buf, count; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + #ifdef CONFIG_QDIO_DEBUG char dbf_text[15]; #endif @@ -927,8 +1194,13 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) if (!q->siga_sync) /* we'll check for more primed buffers in qeth_stop_polling */ return 0; - - if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) + if (irq->is_qebsm) { + count = 1; + start_buf = q->first_to_check; + qdio_do_eqbs(q, &state, &start_buf, &count); + } else + state = q->slsb.acc.val[q->first_to_check]; + if (state != SLSB_P_INPUT_PRIMED) /* * nothing more to do, if next buffer is not PRIMED. * note that we did a SYNC_MEMORY before, that there @@ -955,6 +1227,10 @@ static inline int qdio_is_inbound_q_done(struct qdio_q *q) { int no_used; + unsigned int start_buf, count; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + #ifdef CONFIG_QDIO_DEBUG char dbf_text[15]; #endif @@ -973,8 +1249,13 @@ qdio_is_inbound_q_done(struct qdio_q *q) QDIO_DBF_TEXT4(0,trace,dbf_text); return 1; } - - if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { + if (irq->is_qebsm) { + count = 1; + start_buf = q->first_to_check; + qdio_do_eqbs(q, &state, &start_buf, &count); + } else + state = q->slsb.acc.val[q->first_to_check]; + if (state == SLSB_P_INPUT_PRIMED) { /* we got something to do */ QDIO_DBF_TEXT4(0,trace,"inqisntA"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); @@ -1456,7 +1737,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, void *ptr; int available; - sprintf(dbf_text,"qfqs%4x",cdev->private->irq); + sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); for (i=0;i<no_input_qs;i++) { q=irq_ptr->input_qs[i]; @@ -1476,7 +1757,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, q->queue_type=q_format; q->int_parm=int_parm; - q->irq=irq_ptr->irq; + q->schid = irq_ptr->schid; q->irq_ptr = irq_ptr; q->cdev = cdev; q->mask=1<<(31-i); @@ -1523,11 +1804,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); /* fill in slsb */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { - set_slsb(&q->slsb.acc.val[j], - SLSB_P_INPUT_NOT_INIT); -/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ - } + if (!irq_ptr->is_qebsm) { + unsigned int count = 1; + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count); + } } for (i=0;i<no_output_qs;i++) { @@ -1549,7 +1830,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, q->queue_type=q_format; q->int_parm=int_parm; q->is_input_q=0; - q->irq=irq_ptr->irq; + q->schid = irq_ptr->schid; q->cdev = cdev; q->irq_ptr = irq_ptr; q->mask=1<<(31-i); @@ -1584,11 +1865,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); /* fill in slsb */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { - set_slsb(&q->slsb.acc.val[j], - SLSB_P_OUTPUT_NOT_INIT); -/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ - } + if (!irq_ptr->is_qebsm) { + unsigned int count = 1; + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count); + } } } @@ -1656,7 +1937,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) char dbf_text[15]; QDIO_DBF_TEXT5(0,trace,"newstate"); - sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state); + sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state); QDIO_DBF_TEXT5(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ @@ -1669,12 +1950,12 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) } static inline void -qdio_irq_check_sense(int irq, struct irb *irb) +qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) { char dbf_text[15]; if (irb->esw.esw0.erw.cons) { - sprintf(dbf_text,"sens%4x",irq); + sprintf(dbf_text,"sens%4x",schid.sch_no); QDIO_DBF_TEXT2(1,trace,dbf_text); QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); @@ -1785,21 +2066,22 @@ qdio_timeout_handler(struct ccw_device *cdev) switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: - QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", - irq_ptr->irq); + QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1,setup,"eq:timeo"); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); break; case QDIO_IRQ_STATE_CLEANUP: - QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", - irq_ptr->irq); + QDIO_PRINT_INFO("Did not get interrupt on cleanup, " + "irq=0.%x.%x.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); break; case QDIO_IRQ_STATE_ESTABLISHED: case QDIO_IRQ_STATE_ACTIVE: /* I/O has been terminated by common I/O layer. */ - QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", - irq_ptr->irq); + QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1, trace, "cio:term"); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); if (get_device(&cdev->dev)) { @@ -1862,7 +2144,7 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } } - qdio_irq_check_sense(irq_ptr->irq, irb); + qdio_irq_check_sense(irq_ptr->schid, irb); #ifdef CONFIG_QDIO_DEBUG sprintf(dbf_text, "state:%d", irq_ptr->state); @@ -1905,7 +2187,7 @@ int qdio_synchronize(struct ccw_device *cdev, unsigned int flags, unsigned int queue_number) { - int cc; + int cc = 0; struct qdio_q *q; struct qdio_irq *irq_ptr; void *ptr; @@ -1918,7 +2200,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, return -ENODEV; #ifdef CONFIG_QDIO_DEBUG - *((int*)(&dbf_text[4])) = irq_ptr->irq; + *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no; QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); *((int*)(&dbf_text[0]))=flags; *((int*)(&dbf_text[4]))=queue_number; @@ -1929,12 +2211,14 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, q=irq_ptr->input_qs[queue_number]; if (!q) return -EINVAL; - cc = do_siga_sync(q->irq, 0, q->mask); + if (!(irq_ptr->is_qebsm)) + cc = do_siga_sync(q->schid, 0, q->mask); } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { q=irq_ptr->output_qs[queue_number]; if (!q) return -EINVAL; - cc = do_siga_sync(q->irq, q->mask, 0); + if (!(irq_ptr->is_qebsm)) + cc = do_siga_sync(q->schid, q->mask, 0); } else return -EINVAL; @@ -1945,15 +2229,54 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, return cc; } -static unsigned char -qdio_check_siga_needs(int sch) +static inline void +qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, + unsigned long token) +{ + struct qdio_q *q; + int i; + unsigned int count, start_buf; + char dbf_text[15]; + + /*check if QEBSM is disabled */ + if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) { + irq_ptr->is_qebsm = 0; + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + QDIO_DBF_TEXT0(0,setup,"noV=V"); + return; + } + irq_ptr->sch_token = token; + /*input queue*/ + for (i = 0; i < irq_ptr->no_input_qs;i++) { + q = irq_ptr->input_qs[i]; + count = QDIO_MAX_BUFFERS_PER_Q; + start_buf = 0; + set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count); + } + sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm); + QDIO_DBF_TEXT0(0,setup,dbf_text); + sprintf(dbf_text,"%8lx",irq_ptr->sch_token); + QDIO_DBF_TEXT0(0,setup,dbf_text); + /*output queue*/ + for (i = 0; i < irq_ptr->no_output_qs; i++) { + q = irq_ptr->output_qs[i]; + count = QDIO_MAX_BUFFERS_PER_Q; + start_buf = 0; + set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count); + } +} + +static void +qdio_get_ssqd_information(struct qdio_irq *irq_ptr) { int result; unsigned char qdioac; - struct { struct chsc_header request; - u16 reserved1; + u16 reserved1:10; + u16 ssid:2; + u16 fmt:4; u16 first_sch; u16 reserved2; u16 last_sch; @@ -1964,67 +2287,83 @@ qdio_check_siga_needs(int sch) u8 reserved5; u16 sch; u8 qfmt; - u8 reserved6; - u8 qdioac; + u8 parm; + u8 qdioac1; u8 sch_class; u8 reserved7; u8 icnt; u8 reserved8; u8 ocnt; + u8 reserved9; + u8 mbccnt; + u16 qdioac2; + u64 sch_token; } *ssqd_area; + QDIO_DBF_TEXT0(0,setup,"getssqd"); + qdioac = 0; ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!ssqd_area) { QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ - "SIGAs for sch x%x.\n", sch); - return CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no); + irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || + CHSC_FLAG_SIGA_OUTPUT_NECESSARY || + CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + irq_ptr->is_qebsm = 0; + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + return; } + ssqd_area->request = (struct chsc_header) { .length = 0x0010, .code = 0x0024, }; - - ssqd_area->first_sch = sch; - ssqd_area->last_sch = sch; - - result=chsc(ssqd_area); + ssqd_area->first_sch = irq_ptr->schid.sch_no; + ssqd_area->last_sch = irq_ptr->schid.sch_no; + ssqd_area->ssid = irq_ptr->schid.ssid; + result = chsc(ssqd_area); if (result) { QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ - "SIGAs for sch x%x.\n", - result,sch); + "SIGAs for sch 0.%x.%x.\n", result, + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + irq_ptr->is_qebsm = 0; goto out; } if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { QDIO_PRINT_WARN("response upon checking SIGA needs " \ - "is 0x%x. Using all SIGAs for sch x%x.\n", - ssqd_area->response.code, sch); + "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n", + ssqd_area->response.code, + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || CHSC_FLAG_SIGA_OUTPUT_NECESSARY || CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ + irq_ptr->is_qebsm = 0; goto out; } if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || - (ssqd_area->sch != sch)) { - QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ - "using all SIGAs.\n",sch); + (ssqd_area->sch != irq_ptr->schid.sch_no)) { + QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \ + "using all SIGAs.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ + irq_ptr->is_qebsm = 0; goto out; } - - qdioac = ssqd_area->qdioac; + qdioac = ssqd_area->qdioac1; out: + qdio_check_subchannel_qebsm(irq_ptr, qdioac, + ssqd_area->sch_token); free_page ((unsigned long) ssqd_area); - return qdioac; + irq_ptr->qdioac = qdioac; } static unsigned int @@ -2055,6 +2394,13 @@ tiqdio_check_chsc_availability(void) sprintf(dbf_text,"hydrati%1x", hydra_thinints); QDIO_DBF_TEXT0(0,setup,dbf_text); +#ifdef CONFIG_64BIT + /* Check for QEBSM support in general (bit 58). */ + is_passthrough = css_general_characteristics.qebsm; +#endif + sprintf(dbf_text,"cssQBS:%1x", is_passthrough); + QDIO_DBF_TEXT0(0,setup,dbf_text); + /* Check for aif time delay disablement fac (bit 56). If installed, * omit svs even under lpar (good point by rick again) */ omit_svs = css_general_characteristics.aif_tdd; @@ -2091,7 +2437,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) /* set to 0x10000000 to enable * time delay disablement facility */ u32 reserved5; - u32 subsystem_id; + struct subchannel_id schid; u32 reserved6[1004]; struct chsc_header response; u32 reserved7; @@ -2113,7 +2459,8 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scssc_area) { QDIO_PRINT_WARN("No memory for setting indicators on " \ - "subchannel x%x.\n", irq_ptr->irq); + "subchannel 0.%x.%x.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); return -ENOMEM; } scssc_area->request = (struct chsc_header) { @@ -2127,7 +2474,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) scssc_area->ks = QDIO_STORAGE_KEY; scssc_area->kc = QDIO_STORAGE_KEY; scssc_area->isc = TIQDIO_THININT_ISC; - scssc_area->subsystem_id = (1<<16) + irq_ptr->irq; + scssc_area->schid = irq_ptr->schid; /* enables the time delay disablement facility. Don't care * whether it is really there (i.e. we haven't checked for * it) */ @@ -2137,12 +2484,11 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) QDIO_PRINT_WARN("Time delay disablement facility " \ "not available\n"); - - result = chsc(scssc_area); if (result) { - QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ - "cc=%i.\n",irq_ptr->irq,result); + QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \ + "cc=%i.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result); result = -EIO; goto out; } @@ -2198,7 +2544,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scsscf_area) { QDIO_PRINT_WARN("No memory for setting delay target on " \ - "subchannel x%x.\n", irq_ptr->irq); + "subchannel 0.%x.%x.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); return -ENOMEM; } scsscf_area->request = (struct chsc_header) { @@ -2210,8 +2557,10 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) result=chsc(scsscf_area); if (result) { - QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ - "cc=%i. Continuing.\n",irq_ptr->irq,result); + QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \ + "cc=%i. Continuing.\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + result); result = -EIO; goto out; } @@ -2245,7 +2594,7 @@ qdio_cleanup(struct ccw_device *cdev, int how) if (!irq_ptr) return -ENODEV; - sprintf(dbf_text,"qcln%4x",irq_ptr->irq); + sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT0(0,setup,dbf_text); @@ -2272,7 +2621,7 @@ qdio_shutdown(struct ccw_device *cdev, int how) down(&irq_ptr->setting_up_sema); - sprintf(dbf_text,"qsqs%4x",irq_ptr->irq); + sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT0(0,setup,dbf_text); @@ -2378,7 +2727,7 @@ qdio_free(struct ccw_device *cdev) down(&irq_ptr->setting_up_sema); - sprintf(dbf_text,"qfqs%4x",irq_ptr->irq); + sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT0(0,setup,dbf_text); @@ -2526,13 +2875,14 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, irq_ptr = cdev->private->qdio_data; if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { - sprintf(dbf_text,"ick1%4x",irq_ptr->irq); + sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1,trace,dbf_text); QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); QDIO_PRINT_ERR("received check condition on establish " \ - "queues on irq 0x%x (cs=x%x, ds=x%x).\n", - irq_ptr->irq,cstat,dstat); + "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + cstat,dstat); qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); } @@ -2540,9 +2890,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, QDIO_DBF_TEXT2(1,setup,"eq:no de"); QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " + QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get " "device end: dstat=%02x, cstat=%02x\n", - irq_ptr->irq, dstat, cstat); + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); return 1; } @@ -2551,10 +2902,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, QDIO_DBF_TEXT2(1,setup,"eq:badio"); QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq %04x: got " + QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got " "the following devstat: dstat=%02x, " - "cstat=%02x\n", - irq_ptr->irq, dstat, cstat); + "cstat=%02x\n", irq_ptr->schid.ssid, + irq_ptr->schid.sch_no, dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); return 1; } @@ -2569,7 +2920,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) irq_ptr = cdev->private->qdio_data; - sprintf(dbf_text,"qehi%4x",cdev->private->irq); + sprintf(dbf_text,"qehi%4x",cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); @@ -2588,7 +2939,7 @@ qdio_initialize(struct qdio_initialize *init_data) int rc; char dbf_text[15]; - sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq); + sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); @@ -2609,7 +2960,7 @@ qdio_allocate(struct qdio_initialize *init_data) struct qdio_irq *irq_ptr; char dbf_text[15]; - sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq); + sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || @@ -2682,7 +3033,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data) irq_ptr->int_parm=init_data->int_parm; - irq_ptr->irq = init_data->cdev->private->irq; + irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); irq_ptr->no_input_qs=init_data->no_input_qs; irq_ptr->no_output_qs=init_data->no_output_qs; @@ -2698,11 +3049,12 @@ int qdio_fill_irq(struct qdio_initialize *init_data) QDIO_DBF_TEXT2(0,setup,dbf_text); if (irq_ptr->is_thinint_irq) { - irq_ptr->dev_st_chg_ind=qdio_get_indicator(); + irq_ptr->dev_st_chg_ind = qdio_get_indicator(); QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); if (!irq_ptr->dev_st_chg_ind) { QDIO_PRINT_WARN("no indicator location available " \ - "for irq 0x%x\n",irq_ptr->irq); + "for irq 0.%x.%x\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no); qdio_release_irq_memory(irq_ptr); return -ENOBUFS; } @@ -2747,6 +3099,10 @@ int qdio_fill_irq(struct qdio_initialize *init_data) irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; /* fill in qib */ + irq_ptr->is_qebsm = is_passthrough; + if (irq_ptr->is_qebsm) + irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + irq_ptr->qib.qfmt=init_data->q_format; if (init_data->no_input_qs) irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); @@ -2829,7 +3185,7 @@ qdio_establish(struct qdio_initialize *init_data) tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); } - sprintf(dbf_text,"qest%4x",cdev->private->irq); + sprintf(dbf_text,"qest%4x",cdev->private->sch_no); QDIO_DBF_TEXT0(0,setup,dbf_text); QDIO_DBF_TEXT0(0,trace,dbf_text); @@ -2855,9 +3211,10 @@ qdio_establish(struct qdio_initialize *init_data) sprintf(dbf_text,"eq:io%4x",result); QDIO_DBF_TEXT2(1,setup,dbf_text); } - QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->irq,result,result2); + QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \ + "returned %i, next try returned %i\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + result, result2); result=result2; if (result) ccw_device_set_timeout(cdev, 0); @@ -2884,7 +3241,7 @@ qdio_establish(struct qdio_initialize *init_data) return -EIO; } - irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); + qdio_get_ssqd_information(irq_ptr); /* if this gets set once, we're running under VM and can omit SVSes */ if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) omit_svs=1; @@ -2930,7 +3287,7 @@ qdio_activate(struct ccw_device *cdev, int flags) goto out; } - sprintf(dbf_text,"qact%4x", irq_ptr->irq); + sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(0,setup,dbf_text); QDIO_DBF_TEXT2(0,trace,dbf_text); @@ -2955,9 +3312,10 @@ qdio_activate(struct ccw_device *cdev, int flags) sprintf(dbf_text,"aq:io%4x",result); QDIO_DBF_TEXT2(1,setup,dbf_text); } - QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->irq,result,result2); + QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \ + "returned %i, next try returned %i\n", + irq_ptr->schid.ssid, irq_ptr->schid.sch_no, + result, result2); result=result2; } @@ -3015,30 +3373,40 @@ static inline void qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); + if (irq->is_qebsm) { + while (count) + set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); + return; + } for (;;) { - set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY); + set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); count--; if (!count) break; - qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); + qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); } - - /* not necessary, as the queues are synced during the SIGA read */ - /*SYNC_MEMORY;*/ } static inline void qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; + + qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); + if (irq->is_qebsm) { + while (count) + set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); + return; + } + for (;;) { - set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED); + set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); count--; if (!count) break; - qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); + qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); } - - /* SIGA write will sync the queues */ - /*SYNC_MEMORY;*/ } static inline void @@ -3083,6 +3451,9 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, struct qdio_buffer *buffers) { int used_elements; + unsigned int cnt, start_buf; + unsigned char state = 0; + struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; /* This is the outbound handling of queues */ #ifdef QDIO_PERFORMANCE_STATS @@ -3115,9 +3486,15 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, * SYNC_MEMORY :-/ ), we try to * fast-requeue buffers */ - if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) - &(QDIO_MAX_BUFFERS_PER_Q-1)]!= - SLSB_CU_OUTPUT_PRIMED) { + if (irq->is_qebsm) { + cnt = 1; + start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) & + (QDIO_MAX_BUFFERS_PER_Q-1)); + qdio_do_eqbs(q, &state, &start_buf, &cnt); + } else + state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) + &(QDIO_MAX_BUFFERS_PER_Q-1) ]; + if (state != SLSB_CU_OUTPUT_PRIMED) { qdio_kick_outbound_q(q); } else { QDIO_DBF_TEXT3(0,trace, "fast-req"); @@ -3150,7 +3527,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags, #ifdef CONFIG_QDIO_DEBUG char dbf_text[20]; - sprintf(dbf_text,"doQD%04x",cdev->private->irq); + sprintf(dbf_text,"doQD%04x",cdev->private->sch_no); QDIO_DBF_TEXT3(0,trace,dbf_text); #endif /* CONFIG_QDIO_DEBUG */ diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 328e31c..fa385e7 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -3,14 +3,15 @@ #include <asm/page.h> -#define VERSION_CIO_QDIO_H "$Revision: 1.33 $" +#include "schid.h" + +#define VERSION_CIO_QDIO_H "$Revision: 1.40 $" #ifdef CONFIG_QDIO_DEBUG #define QDIO_VERBOSE_LEVEL 9 #else /* CONFIG_QDIO_DEBUG */ #define QDIO_VERBOSE_LEVEL 5 #endif /* CONFIG_QDIO_DEBUG */ - #define QDIO_USE_PROCESSING_STATE #ifdef CONFIG_QDIO_PERF_STATS @@ -265,12 +266,64 @@ QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ /* * Some instructions as assembly */ + +static inline int +do_sqbs(unsigned long sch, unsigned char state, int queue, + unsigned int *start, unsigned int *count) +{ +#ifdef CONFIG_64BIT + register unsigned long _ccq asm ("0") = *count; + register unsigned long _sch asm ("1") = sch; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; + + asm volatile ( + " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t" + : "+d" (_ccq), "+d" (_queuestart) + : "d" ((unsigned long)state), "d" (_sch) + : "memory", "cc" + ); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; + + return (_ccq >> 32) & 0xff; +#else + return 0; +#endif +} + +static inline int +do_eqbs(unsigned long sch, unsigned char *state, int queue, + unsigned int *start, unsigned int *count) +{ +#ifdef CONFIG_64BIT + register unsigned long _ccq asm ("0") = *count; + register unsigned long _sch asm ("1") = sch; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; + unsigned long _state = 0; + + asm volatile ( + " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t" + : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) + : "d" (_sch) + : "memory", "cc" + ); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; + *state = _state & 0xff; + + return (_ccq >> 32) & 0xff; +#else + return 0; +#endif +} + + static inline int -do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) +do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) { int cc; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 0,2 \n\t" "lr 1,%1 \n\t" @@ -280,10 +333,10 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask1), "d" (mask2) + : "d" (schid), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lghi 0,2 \n\t" "llgfr 1,%1 \n\t" @@ -293,19 +346,19 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask1), "d" (mask2) + : "d" (schid), "d" (mask1), "d" (mask2) : "cc", "0", "1", "2", "3" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ return cc; } static inline int -do_siga_input(unsigned int irq, unsigned int mask) +do_siga_input(struct subchannel_id schid, unsigned int mask) { int cc; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 0,1 \n\t" "lr 1,%1 \n\t" @@ -314,10 +367,10 @@ do_siga_input(unsigned int irq, unsigned int mask) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask) + : "d" (schid), "d" (mask) : "cc", "0", "1", "2", "memory" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lghi 0,1 \n\t" "llgfr 1,%1 \n\t" @@ -326,21 +379,22 @@ do_siga_input(unsigned int irq, unsigned int mask) "ipm %0 \n\t" "srl %0,28 \n\t" : "=d" (cc) - : "d" (0x10000|irq), "d" (mask) + : "d" (schid), "d" (mask) : "cc", "0", "1", "2", "memory" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ return cc; } static inline int -do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) +do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, + unsigned int fc) { int cc; __u32 busy_bit; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 0,0 \n\t" "lr 1,%2 \n\t" @@ -366,14 +420,14 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) ".long 0b,2b \n\t" ".previous \n\t" : "=d" (cc), "=d" (busy_bit) - : "d" (0x10000|irq), "d" (mask), + : "d" (schid), "d" (mask), "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) : "cc", "0", "1", "2", "memory" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( - "lghi 0,0 \n\t" - "llgfr 1,%2 \n\t" + "llgfr 0,%5 \n\t" + "lgr 1,%2 \n\t" "llgfr 2,%3 \n\t" "siga 0 \n\t" "0:" @@ -391,11 +445,11 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) ".quad 0b,1b \n\t" ".previous \n\t" : "=d" (cc), "=d" (busy_bit) - : "d" (0x10000|irq), "d" (mask), - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) + : "d" (schid), "d" (mask), + "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc) : "cc", "0", "1", "2", "memory" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ (*bb) = busy_bit; return cc; @@ -407,21 +461,21 @@ do_clear_global_summary(void) unsigned long time; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT asm volatile ( "lhi 1,3 \n\t" ".insn rre,0xb2650000,2,0 \n\t" "lr %0,3 \n\t" : "=d" (time) : : "cc", "1", "2", "3" ); -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ asm volatile ( "lghi 1,3 \n\t" ".insn rre,0xb2650000,2,0 \n\t" "lgr %0,3 \n\t" : "=d" (time) : : "cc", "1", "2", "3" ); -#endif /* CONFIG_ARCH_S390X */ +#endif /* CONFIG_64BIT */ return time; } @@ -488,42 +542,21 @@ struct qdio_perf_stats { #define MY_MODULE_STRING(x) #x -#ifdef CONFIG_ARCH_S390X +#ifdef CONFIG_64BIT #define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) -#else /* CONFIG_ARCH_S390X */ +#else /* CONFIG_64BIT */ #define QDIO_GET_ADDR(x) ((__u32)(long)x) -#endif /* CONFIG_ARCH_S390X */ - -#ifdef CONFIG_QDIO_DEBUG -#define set_slsb(x,y) \ - if(q->queue_type==QDIO_TRACE_QTYPE) { \ - if(q->is_input_q) { \ - QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } else { \ - QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } \ - } \ - qdio_set_slsb(x,y); \ - if(q->queue_type==QDIO_TRACE_QTYPE) { \ - if(q->is_input_q) { \ - QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } else { \ - QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ - } \ - } -#else /* CONFIG_QDIO_DEBUG */ -#define set_slsb(x,y) qdio_set_slsb(x,y) -#endif /* CONFIG_QDIO_DEBUG */ +#endif /* CONFIG_64BIT */ struct qdio_q { volatile struct slsb slsb; char unused[QDIO_MAX_BUFFERS_PER_Q]; - __u32 * volatile dev_st_chg_ind; + __u32 * dev_st_chg_ind; int is_input_q; - int irq; + struct subchannel_id schid; struct ccw_device *cdev; unsigned int is_iqdio_q; @@ -568,6 +601,7 @@ struct qdio_q { struct tasklet_struct tasklet; #endif /* QDIO_USE_TIMERS_FOR_POLLING */ + enum qdio_irq_states state; /* used to store the error condition during a data transfer */ @@ -617,13 +651,17 @@ struct qdio_irq { __u32 * volatile dev_st_chg_ind; unsigned long int_parm; - int irq; + struct subchannel_id schid; unsigned int is_iqdio_irq; unsigned int is_thinint_irq; unsigned int hydra_gives_outbound_pcis; unsigned int sync_done_on_outb_pcis; + /* QEBSM facility */ + unsigned int is_qebsm; + unsigned long sch_token; + enum qdio_irq_states state; unsigned int no_input_qs; diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h new file mode 100644 index 0000000..54328fe --- /dev/null +++ b/drivers/s390/cio/schid.h @@ -0,0 +1,26 @@ +#ifndef S390_SCHID_H +#define S390_SCHID_H + +struct subchannel_id { + __u32 reserved:13; + __u32 ssid:2; + __u32 one:1; + __u32 sch_no:16; +} __attribute__ ((packed,aligned(4))); + + +/* Helper function for sane state of pre-allocated subchannel_id. */ +static inline void +init_subchannel_id(struct subchannel_id *schid) +{ + memset(schid, 0, sizeof(struct subchannel_id)); + schid->one = 1; +} + +static inline int +schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2) +{ + return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); +} + +#endif /* S390_SCHID_H */ diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h index e319e78..f87c785 100644 --- a/drivers/s390/crypto/z90common.h +++ b/drivers/s390/crypto/z90common.h @@ -1,9 +1,9 @@ /* * linux/drivers/s390/crypto/z90common.h * - * z90crypt 1.3.2 + * z90crypt 1.3.3 * - * Copyright (C) 2001, 2004 IBM Corporation + * Copyright (C) 2001, 2005 IBM Corporation * Author(s): Robert Burroughs (burrough@us.ibm.com) * Eric Rossman (edrossma@us.ibm.com) * @@ -91,12 +91,13 @@ enum hdstat { #define TSQ_FATAL_ERROR 34 #define RSQ_FATAL_ERROR 35 -#define Z90CRYPT_NUM_TYPES 5 +#define Z90CRYPT_NUM_TYPES 6 #define PCICA 0 #define PCICC 1 #define PCIXCC_MCL2 2 #define PCIXCC_MCL3 3 #define CEX2C 4 +#define CEX2A 5 #define NILDEV -1 #define ANYDEV -1 #define PCIXCC_UNK -2 @@ -105,7 +106,7 @@ enum hdevice_type { PCICC_HW = 3, PCICA_HW = 4, PCIXCC_HW = 5, - OTHER_HW = 6, + CEX2A_HW = 6, CEX2C_HW = 7 }; diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h index 0a3bb5a..3a18443 100644 --- a/drivers/s390/crypto/z90crypt.h +++ b/drivers/s390/crypto/z90crypt.h @@ -1,9 +1,9 @@ /* * linux/drivers/s390/crypto/z90crypt.h * - * z90crypt 1.3.2 + * z90crypt 1.3.3 * - * Copyright (C) 2001, 2004 IBM Corporation + * Copyright (C) 2001, 2005 IBM Corporation * Author(s): Robert Burroughs (burrough@us.ibm.com) * Eric Rossman (edrossma@us.ibm.com) * @@ -29,11 +29,11 @@ #include <linux/ioctl.h> -#define VERSION_Z90CRYPT_H "$Revision: 1.11 $" +#define VERSION_Z90CRYPT_H "$Revision: 1.2.2.4 $" #define z90crypt_VERSION 1 #define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards -#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support +#define z90crypt_VARIANT 3 // 3 = CEX2A support /** * struct ica_rsa_modexpo @@ -122,6 +122,9 @@ struct ica_rsa_modexpo_crt { * Z90STAT_CEX2CCOUNT * Return an integer count of all CEX2Cs. * + * Z90STAT_CEX2ACOUNT + * Return an integer count of all CEX2As. + * * Z90STAT_REQUESTQ_COUNT * Return an integer count of the number of entries waiting to be * sent to a device. @@ -144,6 +147,7 @@ struct ica_rsa_modexpo_crt { * 0x03: PCIXCC_MCL2 * 0x04: PCIXCC_MCL3 * 0x05: CEX2C + * 0x06: CEX2A * 0x0d: device is disabled via the proc filesystem * * Z90STAT_QDEPTH_MASK @@ -199,6 +203,7 @@ struct ica_rsa_modexpo_crt { #define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int) #define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int) #define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int) +#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int) #define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int) #define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int) #define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int) diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c index c215e08..d7f7494 100644 --- a/drivers/s390/crypto/z90hardware.c +++ b/drivers/s390/crypto/z90hardware.c @@ -1,9 +1,9 @@ /* * linux/drivers/s390/crypto/z90hardware.c * - * z90crypt 1.3.2 + * z90crypt 1.3.3 * - * Copyright (C) 2001, 2004 IBM Corporation + * Copyright (C) 2001, 2005 IBM Corporation * Author(s): Robert Burroughs (burrough@us.ibm.com) * Eric Rossman (edrossma@us.ibm.com) * @@ -648,6 +648,87 @@ static struct cca_public_sec static_cca_pub_sec = { #define RESPONSE_CPRB_SIZE 0x000006B8 #define RESPONSE_CPRBX_SIZE 0x00000724 +struct type50_hdr { + u8 reserved1; + u8 msg_type_code; + u16 msg_len; + u8 reserved2; + u8 ignored; + u16 reserved3; +}; + +#define TYPE50_TYPE_CODE 0x50 + +#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg)) +#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg)) +#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg)) +#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg)) + +#define TYPE50_MEB1_FMT 0x0001 +#define TYPE50_MEB2_FMT 0x0002 +#define TYPE50_CRB1_FMT 0x0011 +#define TYPE50_CRB2_FMT 0x0012 + +struct type50_meb1_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 exponent[128]; + u8 modulus[128]; + u8 message[128]; +}; + +struct type50_meb2_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 exponent[256]; + u8 modulus[256]; + u8 message[256]; +}; + +struct type50_crb1_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 p[64]; + u8 q[64]; + u8 dp[64]; + u8 dq[64]; + u8 u[64]; + u8 message[128]; +}; + +struct type50_crb2_msg { + struct type50_hdr header; + u16 keyblock_type; + u8 reserved[6]; + u8 p[128]; + u8 q[128]; + u8 dp[128]; + u8 dq[128]; + u8 u[128]; + u8 message[256]; +}; + +union type50_msg { + struct type50_meb1_msg meb1; + struct type50_meb2_msg meb2; + struct type50_crb1_msg crb1; + struct type50_crb2_msg crb2; +}; + +struct type80_hdr { + u8 reserved1; + u8 type; + u16 len; + u8 code; + u8 reserved2[3]; + u8 reserved3[8]; +}; + +#define TYPE80_RSP_CODE 0x80 + struct error_hdr { unsigned char reserved1; unsigned char type; @@ -657,6 +738,7 @@ struct error_hdr { }; #define TYPE82_RSP_CODE 0x82 +#define TYPE88_RSP_CODE 0x88 #define REP82_ERROR_MACHINE_FAILURE 0x10 #define REP82_ERROR_PREEMPT_FAILURE 0x12 @@ -679,6 +761,22 @@ struct error_hdr { #define REP82_ERROR_PACKET_TRUNCATED 0xA0 #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 +#define REP88_ERROR_MODULE_FAILURE 0x10 +#define REP88_ERROR_MODULE_TIMEOUT 0x11 +#define REP88_ERROR_MODULE_NOTINIT 0x13 +#define REP88_ERROR_MODULE_NOTAVAIL 0x14 +#define REP88_ERROR_MODULE_DISABLED 0x15 +#define REP88_ERROR_MODULE_IN_DIAGN 0x17 +#define REP88_ERROR_FASTPATH_DISABLD 0x19 +#define REP88_ERROR_MESSAGE_TYPE 0x20 +#define REP88_ERROR_MESSAGE_MALFORMD 0x22 +#define REP88_ERROR_MESSAGE_LENGTH 0x23 +#define REP88_ERROR_RESERVED_FIELD 0x24 +#define REP88_ERROR_KEY_TYPE 0x34 +#define REP88_ERROR_INVALID_KEY 0x82 +#define REP88_ERROR_OPERAND 0x84 +#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 + #define CALLER_HEADER 12 static inline int @@ -687,7 +785,7 @@ testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat) int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" llgfr 0,%4 \n" " slgr 1,1 \n" " lgr 2,1 \n" @@ -757,7 +855,7 @@ resetq(int q_nr, struct ap_status_word *stat_p) int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" llgfr 0,%2 \n" " lghi 1,1 \n" " sll 1,24 \n" @@ -823,7 +921,7 @@ sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat) int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" lgr 6,%3 \n" " llgfr 7,%2 \n" " llgt 0,0(6) \n" @@ -902,7 +1000,7 @@ rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id, int ccode; asm volatile -#ifdef __s390x__ +#ifdef CONFIG_64BIT (" llgfr 0,%2 \n" " lgr 3,%4 \n" " lgr 6,%3 \n" @@ -1029,10 +1127,6 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type) stat = HD_ONLINE; *q_depth = t_depth + 1; switch (t_dev_type) { - case OTHER_HW: - stat = HD_NOT_THERE; - *dev_type = NILDEV; - break; case PCICA_HW: *dev_type = PCICA; break; @@ -1045,6 +1139,9 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type) case CEX2C_HW: *dev_type = CEX2C; break; + case CEX2A_HW: + *dev_type = CEX2A; + break; default: *dev_type = NILDEV; break; @@ -2029,6 +2126,177 @@ ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx, return 0; } +static int +ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p, + union type50_msg *z90cMsg_p) +{ + int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len; + unsigned char *mod_tgt, *exp_tgt, *inp_tgt; + union type50_msg *tmp_type50_msg; + + mod_len = icaMex_p->inputdatalength; + + msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) + + CALLER_HEADER; + + memset(z90cMsg_p, 0, msg_size); + + tmp_type50_msg = (union type50_msg *) + ((unsigned char *) z90cMsg_p + CALLER_HEADER); + + tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE; + + if (mod_len <= 128) { + tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN; + tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT; + mod_tgt = tmp_type50_msg->meb1.modulus; + mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus); + exp_tgt = tmp_type50_msg->meb1.exponent; + exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent); + inp_tgt = tmp_type50_msg->meb1.message; + inp_tgt_len = sizeof(tmp_type50_msg->meb1.message); + } else { + tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN; + tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT; + mod_tgt = tmp_type50_msg->meb2.modulus; + mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus); + exp_tgt = tmp_type50_msg->meb2.exponent; + exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent); + inp_tgt = tmp_type50_msg->meb2.message; + inp_tgt_len = sizeof(tmp_type50_msg->meb2.message); + } + + mod_tgt += (mod_tgt_len - mod_len); + if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len)) + return SEN_RELEASED; + if (is_empty(mod_tgt, mod_len)) + return SEN_USER_ERROR; + exp_tgt += (exp_tgt_len - mod_len); + if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len)) + return SEN_RELEASED; + if (is_empty(exp_tgt, mod_len)) + return SEN_USER_ERROR; + inp_tgt += (inp_tgt_len - mod_len); + if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len)) + return SEN_RELEASED; + if (is_empty(inp_tgt, mod_len)) + return SEN_USER_ERROR; + + *z90cMsg_l_p = msg_size - CALLER_HEADER; + + return 0; +} + +static int +ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, + int *z90cMsg_l_p, union type50_msg *z90cMsg_p) +{ + int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len, + dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset; + unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt, + temp[8]; + union type50_msg *tmp_type50_msg; + + mod_len = icaMsg_p->inputdatalength; + short_len = mod_len / 2; + long_len = mod_len / 2 + 8; + long_offset = 0; + + if (long_len > 128) { + memset(temp, 0x00, sizeof(temp)); + if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128)) + return SEN_RELEASED; + if (!is_empty(temp, 8)) + return SEN_NOT_AVAIL; + if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128)) + return SEN_RELEASED; + if (!is_empty(temp, 8)) + return SEN_NOT_AVAIL; + if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128)) + return SEN_RELEASED; + if (!is_empty(temp, 8)) + return SEN_NOT_AVAIL; + long_offset = long_len - 128; + long_len = 128; + } + + tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) + + CALLER_HEADER; + + memset(z90cMsg_p, 0, tmp_size); + + tmp_type50_msg = (union type50_msg *) + ((unsigned char *) z90cMsg_p + CALLER_HEADER); + + tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE; + if (long_len <= 64) { + tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN; + tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT; + p_tgt = tmp_type50_msg->crb1.p; + p_tgt_len = sizeof(tmp_type50_msg->crb1.p); + q_tgt = tmp_type50_msg->crb1.q; + q_tgt_len = sizeof(tmp_type50_msg->crb1.q); + dp_tgt = tmp_type50_msg->crb1.dp; + dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp); + dq_tgt = tmp_type50_msg->crb1.dq; + dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq); + u_tgt = tmp_type50_msg->crb1.u; + u_tgt_len = sizeof(tmp_type50_msg->crb1.u); + inp_tgt = tmp_type50_msg->crb1.message; + inp_tgt_len = sizeof(tmp_type50_msg->crb1.message); + } else { + tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN; + tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT; + p_tgt = tmp_type50_msg->crb2.p; + p_tgt_len = sizeof(tmp_type50_msg->crb2.p); + q_tgt = tmp_type50_msg->crb2.q; + q_tgt_len = sizeof(tmp_type50_msg->crb2.q); + dp_tgt = tmp_type50_msg->crb2.dp; + dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp); + dq_tgt = tmp_type50_msg->crb2.dq; + dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq); + u_tgt = tmp_type50_msg->crb2.u; + u_tgt_len = sizeof(tmp_type50_msg->crb2.u); + inp_tgt = tmp_type50_msg->crb2.message; + inp_tgt_len = sizeof(tmp_type50_msg->crb2.message); + } + + p_tgt += (p_tgt_len - long_len); + if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len)) + return SEN_RELEASED; + if (is_empty(p_tgt, long_len)) + return SEN_USER_ERROR; + q_tgt += (q_tgt_len - short_len); + if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len)) + return SEN_RELEASED; + if (is_empty(q_tgt, short_len)) + return SEN_USER_ERROR; + dp_tgt += (dp_tgt_len - long_len); + if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len)) + return SEN_RELEASED; + if (is_empty(dp_tgt, long_len)) + return SEN_USER_ERROR; + dq_tgt += (dq_tgt_len - short_len); + if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len)) + return SEN_RELEASED; + if (is_empty(dq_tgt, short_len)) + return SEN_USER_ERROR; + u_tgt += (u_tgt_len - long_len); + if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len)) + return SEN_RELEASED; + if (is_empty(u_tgt, long_len)) + return SEN_USER_ERROR; + inp_tgt += (inp_tgt_len - mod_len); + if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len)) + return SEN_RELEASED; + if (is_empty(inp_tgt, mod_len)) + return SEN_USER_ERROR; + + *z90cMsg_l_p = tmp_size - CALLER_HEADER; + + return 0; +} + int convert_request(unsigned char *buffer, int func, unsigned short function, int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p) @@ -2071,6 +2339,16 @@ convert_request(unsigned char *buffer, int func, unsigned short function, cdx, msg_l_p, (struct type6_msg *) msg_p, dev_type); } + if (dev_type == CEX2A) { + if (func == ICARSACRT) + return ICACRT_msg_to_type50CRT_msg( + (struct ica_rsa_modexpo_crt *) buffer, + msg_l_p, (union type50_msg *) msg_p); + else + return ICAMEX_msg_to_type50MEX_msg( + (struct ica_rsa_modexpo *) buffer, + msg_l_p, (union type50_msg *) msg_p); + } return 0; } @@ -2081,8 +2359,8 @@ unset_ext_bitlens(void) { if (!ext_bitlens_msg_count) { PRINTK("Unable to use coprocessors for extended bitlengths. " - "Using PCICAs (if present) for extended bitlengths. " - "This is not an error.\n"); + "Using PCICAs/CEX2As (if present) for extended " + "bitlengths. This is not an error.\n"); ext_bitlens_msg_count++; } ext_bitlens = 0; @@ -2094,6 +2372,7 @@ convert_response(unsigned char *response, unsigned char *buffer, { struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer; struct error_hdr *errh_p = (struct error_hdr *) response; + struct type80_hdr *t80h_p = (struct type80_hdr *) response; struct type84_hdr *t84h_p = (struct type84_hdr *) response; struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response; int reply_code, service_rc, service_rs, src_l; @@ -2108,6 +2387,7 @@ convert_response(unsigned char *response, unsigned char *buffer, src_l = 0; switch (errh_p->type) { case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: reply_code = errh_p->reply_code; src_p = (unsigned char *)errh_p; PRINTK("Hardware error: Type %02X Message Header: " @@ -2116,6 +2396,10 @@ convert_response(unsigned char *response, unsigned char *buffer, src_p[0], src_p[1], src_p[2], src_p[3], src_p[4], src_p[5], src_p[6], src_p[7]); break; + case TYPE80_RSP_CODE: + src_l = icaMsg_p->outputdatalength; + src_p = response + (int)t80h_p->len - src_l; + break; case TYPE84_RSP_CODE: src_l = icaMsg_p->outputdatalength; src_p = response + (int)t84h_p->len - src_l; @@ -2202,6 +2486,7 @@ convert_response(unsigned char *response, unsigned char *buffer, if (reply_code) switch (reply_code) { case REP82_ERROR_OPERAND_INVALID: + case REP88_ERROR_MESSAGE_MALFORMD: return REC_OPERAND_INV; case REP82_ERROR_OPERAND_SIZE: return REC_OPERAND_SIZE; diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c index 790fcbb..135ae04 100644 --- a/drivers/s390/crypto/z90main.c +++ b/drivers/s390/crypto/z90main.c @@ -228,7 +228,7 @@ struct device_x { */ struct device { int dev_type; // PCICA, PCICC, PCIXCC_MCL2, - // PCIXCC_MCL3, CEX2C + // PCIXCC_MCL3, CEX2C, CEX2A enum devstat dev_stat; // current device status int dev_self_x; // Index in array int disabled; // Set when device is in error @@ -295,26 +295,30 @@ struct caller { /** * Function prototypes from z90hardware.c */ -enum hdstat query_online(int, int, int, int *, int *); -enum devstat reset_device(int, int, int); -enum devstat send_to_AP(int, int, int, unsigned char *); -enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *); -int convert_request(unsigned char *, int, short, int, int, int *, - unsigned char *); -int convert_response(unsigned char *, unsigned char *, int *, unsigned char *); +enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth, + int *dev_type); +enum devstat reset_device(int deviceNr, int cdx, int resetNr); +enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext); +enum devstat receive_from_AP(int dev_nr, int cdx, int resplen, + unsigned char *resp, unsigned char *psmid); +int convert_request(unsigned char *buffer, int func, unsigned short function, + int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p); +int convert_response(unsigned char *response, unsigned char *buffer, + int *respbufflen_p, unsigned char *resp_buff); /** * Low level function prototypes */ -static int create_z90crypt(int *); -static int refresh_z90crypt(int *); -static int find_crypto_devices(struct status *); -static int create_crypto_device(int); -static int destroy_crypto_device(int); +static int create_z90crypt(int *cdx_p); +static int refresh_z90crypt(int *cdx_p); +static int find_crypto_devices(struct status *deviceMask); +static int create_crypto_device(int index); +static int destroy_crypto_device(int index); static void destroy_z90crypt(void); -static int refresh_index_array(struct status *, struct device_x *); -static int probe_device_type(struct device *); -static int probe_PCIXCC_type(struct device *); +static int refresh_index_array(struct status *status_str, + struct device_x *index_array); +static int probe_device_type(struct device *devPtr); +static int probe_PCIXCC_type(struct device *devPtr); /** * proc fs definitions @@ -425,7 +429,7 @@ static struct miscdevice z90crypt_misc_device = { MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman" "and Jochen Roehrig"); MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, " - "Copyright 2001, 2004 IBM Corporation"); + "Copyright 2001, 2005 IBM Corporation"); MODULE_LICENSE("GPL"); module_param(domain, int, 0); MODULE_PARM_DESC(domain, "domain index for device"); @@ -860,6 +864,12 @@ get_status_CEX2Ccount(void) } static inline int +get_status_CEX2Acount(void) +{ + return z90crypt.hdware_info->type_mask[CEX2A].st_count; +} + +static inline int get_status_requestq_count(void) { return requestq_count; @@ -1008,11 +1018,13 @@ static inline int select_device_type(int *dev_type_p, int bytelength) { static int count = 0; - int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use; + int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail, + index_to_use; struct status *stat; if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) && (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) && - (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV)) + (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) && + (*dev_type_p != ANYDEV)) return -1; if (*dev_type_p != ANYDEV) { stat = &z90crypt.hdware_info->type_mask[*dev_type_p]; @@ -1022,7 +1034,13 @@ select_device_type(int *dev_type_p, int bytelength) return -1; } - /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */ + /** + * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in + * speed. + * + * PCICA and CEX2A do NOT co-exist, so it would be either one or the + * other present. + */ stat = &z90crypt.hdware_info->type_mask[PCICA]; PCICA_avail = stat->st_count - (stat->disabled_count + stat->user_disabled_count); @@ -1032,29 +1050,38 @@ select_device_type(int *dev_type_p, int bytelength) stat = &z90crypt.hdware_info->type_mask[CEX2C]; CEX2C_avail = stat->st_count - (stat->disabled_count + stat->user_disabled_count); - if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) { + stat = &z90crypt.hdware_info->type_mask[CEX2A]; + CEX2A_avail = stat->st_count - + (stat->disabled_count + stat->user_disabled_count); + if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) { /** - * bitlength is a factor, PCICA is the most capable, even with - * the new MCL for PCIXCC. + * bitlength is a factor, PCICA or CEX2A are the most capable, + * even with the new MCL for PCIXCC. */ if ((bytelength < PCIXCC_MIN_MOD_SIZE) || (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { - if (!PCICA_avail) - return -1; - else { + if (PCICA_avail) { *dev_type_p = PCICA; return 0; } + if (CEX2A_avail) { + *dev_type_p = CEX2A; + return 0; + } + return -1; } index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail + - CEX2C_avail); + CEX2C_avail + CEX2A_avail); if (index_to_use < PCICA_avail) *dev_type_p = PCICA; else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail)) *dev_type_p = PCIXCC_MCL3; - else + else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail + + CEX2C_avail)) *dev_type_p = CEX2C; + else + *dev_type_p = CEX2A; count++; return 0; } @@ -1359,7 +1386,7 @@ build_caller(struct work_element *we_p, short function) if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) && (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && - (we_p->devtype != CEX2C)) + (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A)) return SEN_NOT_AVAIL; memcpy(caller_p->caller_id, we_p->caller_id, @@ -1428,7 +1455,8 @@ get_crypto_request_buffer(struct work_element *we_p) if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) && (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && - (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) { + (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) && + (we_p->devtype != ANYDEV)) { PRINTK("invalid device type\n"); return SEN_USER_ERROR; } @@ -1503,8 +1531,9 @@ get_crypto_request_buffer(struct work_element *we_p) function = PCI_FUNC_KEY_ENCRYPT; switch (we_p->devtype) { - /* PCICA does everything with a simple RSA mod-expo operation */ + /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */ case PCICA: + case CEX2A: function = PCI_FUNC_KEY_ENCRYPT; break; /** @@ -1662,7 +1691,8 @@ z90crypt_rsa(struct priv_data *private_data_p, pid_t pid, * trigger a fallback to software. */ case -EINVAL: - if (we_p->devtype != PCICA) + if ((we_p->devtype != PCICA) && + (we_p->devtype != CEX2A)) rv = -EGETBUFF; break; case -ETIMEOUT: @@ -1779,6 +1809,12 @@ z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ret = -EFAULT; break; + case Z90STAT_CEX2ACOUNT: + tempstat = get_status_CEX2Acount(); + if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) + ret = -EFAULT; + break; + case Z90STAT_REQUESTQ_COUNT: tempstat = get_status_requestq_count(); if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) @@ -2019,6 +2055,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset, get_status_PCIXCCMCL3count()); len += sprintf(resp_buff+len, "CEX2C count: %d\n", get_status_CEX2Ccount()); + len += sprintf(resp_buff+len, "CEX2A count: %d\n", + get_status_CEX2Acount()); len += sprintf(resp_buff+len, "requestq count: %d\n", get_status_requestq_count()); len += sprintf(resp_buff+len, "pendingq count: %d\n", @@ -2026,8 +2064,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset, len += sprintf(resp_buff+len, "Total open handles: %d\n\n", get_status_totalopen_count()); len += sprinthx( - "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), " - "4: PCIXCC (MCL3), 5: CEX2C", + "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " + "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", resp_buff+len, get_status_status_mask(workarea), Z90CRYPT_NUM_APS); @@ -2140,6 +2178,7 @@ z90crypt_status_write(struct file *file, const char __user *buffer, case '3': // PCIXCC_MCL2 case '4': // PCIXCC_MCL3 case '5': // CEX2C + case '6': // CEX2A j++; break; case 'd': @@ -3007,7 +3046,9 @@ create_crypto_device(int index) z90crypt.hdware_info->device_type_array[index] = 4; else if (deviceType == CEX2C) z90crypt.hdware_info->device_type_array[index] = 5; - else + else if (deviceType == CEX2A) + z90crypt.hdware_info->device_type_array[index] = 6; + else // No idea how this would happen. z90crypt.hdware_info->device_type_array[index] = -1; } diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index a7efc39..5488547 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -1,5 +1,5 @@ menu "S/390 network device drivers" - depends on NETDEVICES && ARCH_S390 + depends on NETDEVICES && S390 config LCS tristate "Lan Channel Station Interface" diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 6b63d21..e70af7f 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -1603,7 +1603,7 @@ dumpit(char* buf, int len) __u32 ct, sw, rm, dup; char *ptr, *rptr; char tbuf[82], tdup[82]; -#if (CONFIG_ARCH_S390X) +#if (CONFIG_64BIT) char addr[22]; #else char addr[12]; @@ -1619,7 +1619,7 @@ dumpit(char* buf, int len) dup = 0; for ( ct=0; ct < len; ct++, ptr++, rptr++ ) { if (sw == 0) { -#if (CONFIG_ARCH_S390X) +#if (CONFIG_64BIT) sprintf(addr, "%16.16lX",(unsigned long)rptr); #else sprintf(addr, "%8.8X",(__u32)rptr); @@ -1634,7 +1634,7 @@ dumpit(char* buf, int len) if (sw == 8) { strcat(bhex, " "); } -#if (CONFIG_ARCH_S390X) +#if (CONFIG_64BIT) sprintf(tbuf,"%2.2lX", (unsigned long)*ptr); #else sprintf(tbuf,"%2.2X", (__u32)*ptr); diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index 0075894..77dacb4 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c @@ -1,5 +1,5 @@ /* - * $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $ + * $Id: cu3088.c,v 1.36 2005/10/25 14:37:17 cohuck Exp $ * * CTC / LCS ccw_device driver * @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/err.h> +#include <asm/s390_rdev.h> #include <asm/ccwdev.h> #include <asm/ccwgroup.h> diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c index df7647c..ea81773 100644 --- a/drivers/s390/net/iucv.c +++ b/drivers/s390/net/iucv.c @@ -1,5 +1,5 @@ /* - * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $ + * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $ * * IUCV network driver * @@ -29,7 +29,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $ + * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $ * */ @@ -54,7 +54,7 @@ #include <asm/s390_ext.h> #include <asm/ebcdic.h> #include <asm/smp.h> -#include <asm/ccwdev.h> //for root device stuff +#include <asm/s390_rdev.h> /* FLAGS: * All flags are defined in the field IPFLAGS1 of each function @@ -355,7 +355,7 @@ do { \ static void iucv_banner(void) { - char vbuf[] = "$Revision: 1.45 $"; + char vbuf[] = "$Revision: 1.47 $"; char *version = vbuf; if ((version = strchr(version, ':'))) { @@ -477,7 +477,7 @@ grab_param(void) ptr++; if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) ptr = iucv_param_pool; - } while (atomic_compare_and_swap(0, 1, &ptr->in_use)); + } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0); hint = ptr - iucv_param_pool; memset(&ptr->param, 0, sizeof(ptr->param)); diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index f8f55cc..97f927c 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -65,6 +65,7 @@ #include <asm/timex.h> #include <asm/semaphore.h> #include <asm/uaccess.h> +#include <asm/s390_rdev.h> #include "qeth.h" #include "qeth_mpc.h" @@ -1396,7 +1397,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel, channel->ccw.cda = (__u32) __pa(iob->data); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(setup, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, @@ -1463,7 +1464,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel, memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(setup, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, @@ -1616,7 +1617,7 @@ qeth_issue_next_read(struct qeth_card *card) } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); + atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0); QETH_DBF_TEXT(trace, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); @@ -1882,7 +1883,7 @@ qeth_send_control_data(struct qeth_card *card, int len, spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); + atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); if (IS_IPA(iob->data)) timer.expires = jiffies + QETH_IPA_TIMEOUT; @@ -1924,7 +1925,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len, QETH_DBF_TEXT(trace, 5, "osndctrd"); wait_event(card->wait_q, - atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); + atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); QETH_DBF_TEXT(trace, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); @@ -4236,9 +4237,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, QETH_DBF_TEXT(trace, 6, "dosndpfa"); /* spin until we get the queue ... */ - while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, - QETH_OUT_Q_LOCKED, - &queue->state)); + while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); /* ... now we've got the queue */ index = queue->next_buf_to_fill; buffer = &queue->bufs[queue->next_buf_to_fill]; @@ -4292,9 +4292,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, QETH_DBF_TEXT(trace, 6, "dosndpkt"); /* spin until we get the queue ... */ - while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, - QETH_OUT_Q_LOCKED, - &queue->state)); + while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; buffer = &queue->bufs[queue->next_buf_to_fill]; /* diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c new file mode 100644 index 0000000..566cc3d --- /dev/null +++ b/drivers/s390/s390_rdev.c @@ -0,0 +1,53 @@ +/* + * drivers/s390/s390_rdev.c + * s390 root device + * $Revision: 1.2 $ + * + * Copyright (C) 2002, 2005 IBM Deutschland Entwicklung GmbH, + * IBM Corporation + * Author(s): Cornelia Huck (cohuck@de.ibm.com) + * Carsten Otte (cotte@de.ibm.com) + */ + +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/device.h> +#include <asm/s390_rdev.h> + +static void +s390_root_dev_release(struct device *dev) +{ + kfree(dev); +} + +struct device * +s390_root_dev_register(const char *name) +{ + struct device *dev; + int ret; + + if (!strlen(name)) + return ERR_PTR(-EINVAL); + dev = kmalloc(sizeof(struct device), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + memset(dev, 0, sizeof(struct device)); + strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); + dev->release = s390_root_dev_release; + ret = device_register(dev); + if (ret) { + kfree(dev); + return ERR_PTR(ret); + } + return dev; +} + +void +s390_root_dev_unregister(struct device *dev) +{ + if (dev) + device_unregister(dev); +} + +EXPORT_SYMBOL(s390_root_dev_register); +EXPORT_SYMBOL(s390_root_dev_unregister); diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 4191fd9..3bf4666 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -23,7 +23,7 @@ static struct semaphore m_sem; -extern int css_process_crw(int); +extern int css_process_crw(int, int); extern int chsc_process_crw(void); extern int chp_process_crw(int, int); extern void css_reiterate_subchannels(void); @@ -49,9 +49,10 @@ s390_handle_damage(char *msg) static int s390_collect_crw_info(void *param) { - struct crw crw; + struct crw crw[2]; int ccode, ret, slow; struct semaphore *sem; + unsigned int chain; sem = (struct semaphore *)param; /* Set a nice name. */ @@ -59,25 +60,50 @@ s390_collect_crw_info(void *param) repeat: down_interruptible(sem); slow = 0; + chain = 0; while (1) { - ccode = stcrw(&crw); + if (unlikely(chain > 1)) { + struct crw tmp_crw; + + printk(KERN_WARNING"%s: Code does not support more " + "than two chained crws; please report to " + "linux390@de.ibm.com!\n", __FUNCTION__); + ccode = stcrw(&tmp_crw); + printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + __FUNCTION__, tmp_crw.slct, tmp_crw.oflw, + tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, + tmp_crw.erc, tmp_crw.rsid); + printk(KERN_WARNING"%s: This was crw number %x in the " + "chain\n", __FUNCTION__, chain); + if (ccode != 0) + break; + chain = tmp_crw.chn ? chain + 1 : 0; + continue; + } + ccode = stcrw(&crw[chain]); if (ccode != 0) break; DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", - crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc, - crw.erc, crw.rsid); + crw[chain].slct, crw[chain].oflw, crw[chain].chn, + crw[chain].rsc, crw[chain].anc, crw[chain].erc, + crw[chain].rsid); /* Check for overflows. */ - if (crw.oflw) { + if (crw[chain].oflw) { pr_debug("%s: crw overflow detected!\n", __FUNCTION__); css_reiterate_subchannels(); + chain = 0; slow = 1; continue; } - switch (crw.rsc) { + switch (crw[chain].rsc) { case CRW_RSC_SCH: - pr_debug("source is subchannel %04X\n", crw.rsid); - ret = css_process_crw (crw.rsid); + if (crw[0].chn && !chain) + break; + pr_debug("source is subchannel %04X\n", crw[0].rsid); + ret = css_process_crw (crw[0].rsid, + chain ? crw[1].rsid : 0); if (ret == -EAGAIN) slow = 1; break; @@ -85,18 +111,18 @@ repeat: pr_debug("source is monitoring facility\n"); break; case CRW_RSC_CPATH: - pr_debug("source is channel path %02X\n", crw.rsid); - switch (crw.erc) { + pr_debug("source is channel path %02X\n", crw[0].rsid); + switch (crw[0].erc) { case CRW_ERC_IPARM: /* Path has come. */ - ret = chp_process_crw(crw.rsid, 1); + ret = chp_process_crw(crw[0].rsid, 1); break; case CRW_ERC_PERRI: /* Path has gone. */ case CRW_ERC_PERRN: - ret = chp_process_crw(crw.rsid, 0); + ret = chp_process_crw(crw[0].rsid, 0); break; default: pr_debug("Don't know how to handle erc=%x\n", - crw.erc); + crw[0].erc); ret = 0; } if (ret == -EAGAIN) @@ -115,6 +141,8 @@ repeat: pr_debug("unknown source\n"); break; } + /* chain is always 0 or 1 here. */ + chain = crw[chain].chn ? chain + 1 : 0; } if (slow) queue_work(slow_path_wq, &slow_path_work); @@ -218,7 +246,7 @@ s390_revalidate_registers(struct mci *mci) */ kill_task = 1; -#ifndef __s390x__ +#ifndef CONFIG_64BIT asm volatile("ld 0,0(%0)\n" "ld 2,8(%0)\n" "ld 4,16(%0)\n" @@ -227,7 +255,7 @@ s390_revalidate_registers(struct mci *mci) #endif if (MACHINE_HAS_IEEE) { -#ifdef __s390x__ +#ifdef CONFIG_64BIT fpt_save_area = &S390_lowcore.floating_pt_save_area; fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; #else @@ -286,7 +314,7 @@ s390_revalidate_registers(struct mci *mci) */ s390_handle_damage("invalid control registers."); else -#ifdef __s390x__ +#ifdef CONFIG_64BIT asm volatile("lctlg 0,15,0(%0)" : : "a" (&S390_lowcore.cregs_save_area)); #else @@ -299,7 +327,7 @@ s390_revalidate_registers(struct mci *mci) * can't write something sensible into that register. */ -#ifdef __s390x__ +#ifdef CONFIG_64BIT /* * See if we can revalidate the TOD programmable register with its * old contents (should be zero) otherwise set it to zero. @@ -356,7 +384,7 @@ s390_do_machine_check(struct pt_regs *regs) if (mci->b) { /* Processing backup -> verify if we can survive this */ u64 z_mcic, o_mcic, t_mcic; -#ifdef __s390x__ +#ifdef CONFIG_64BIT z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 87c2db1..66da840 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -106,7 +106,7 @@ static inline int stsi (void *sysinfo, { int cc, retv; -#ifndef CONFIG_ARCH_S390X +#ifndef CONFIG_64BIT __asm__ __volatile__ ( "lr\t0,%2\n" "\tlr\t1,%3\n" "\tstsi\t0(%4)\n" diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4c42065..3c606cf 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -914,7 +914,7 @@ config SCSI_INIA100 config SCSI_PPA tristate "IOMEGA parallel port (ppa - older drives)" - depends on SCSI && PARPORT + depends on SCSI && PARPORT_PC ---help--- This driver supports older versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -941,7 +941,7 @@ config SCSI_PPA config SCSI_IMM tristate "IOMEGA parallel port (imm - newer drives)" - depends on SCSI && PARPORT + depends on SCSI && PARPORT_PC ---help--- This driver supports newer versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -968,7 +968,7 @@ config SCSI_IMM config SCSI_IZIP_EPP16 bool "ppa/imm option - Use slow (but safe) EPP-16" - depends on PARPORT && (SCSI_PPA || SCSI_IMM) + depends on SCSI_PPA || SCSI_IMM ---help--- EPP (Enhanced Parallel Port) is a standard for parallel ports which allows them to act as expansion buses that can handle up to 64 @@ -983,7 +983,7 @@ config SCSI_IZIP_EPP16 config SCSI_IZIP_SLOW_CTR bool "ppa/imm option - Assume slow parport control register" - depends on PARPORT && (SCSI_PPA || SCSI_IMM) + depends on SCSI_PPA || SCSI_IMM help Some parallel ports are known to have excessive delays between changing the parallel port control register and good data being @@ -1815,7 +1815,7 @@ config SCSI_SUNESP config ZFCP tristate "FCP host bus adapter driver for IBM eServer zSeries" - depends on ARCH_S390 && QDIO && SCSI + depends on S390 && QDIO && SCSI select SCSI_FC_ATTRS help If you want to access SCSI devices attached to your IBM eServer diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 4b647ee..557788e 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -166,6 +166,8 @@ static struct pci_driver piix_pci_driver = { .id_table = piix_pci_tbl, .probe = piix_init_one, .remove = ata_pci_remove_one, + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, }; static struct scsi_host_template piix_sht = { @@ -185,6 +187,8 @@ static struct scsi_host_template piix_sht = { .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, .bios_param = ata_std_bios_param, + .resume = ata_scsi_device_resume, + .suspend = ata_scsi_device_suspend, }; static const struct ata_port_operations piix_pata_ops = { diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index bdfb0a8..f55b9b3 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -4173,6 +4173,96 @@ err_out: * Inherited from caller. */ +/* + * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, + * without filling any other registers + */ +static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, + u8 cmd) +{ + struct ata_taskfile tf; + int err; + + ata_tf_init(ap, &tf, dev->devno); + + tf.command = cmd; + tf.flags |= ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + + err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); + if (err) + printk(KERN_ERR "%s: ata command failed: %d\n", + __FUNCTION__, err); + + return err; +} + +static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) +{ + u8 cmd; + + if (!ata_try_flush_cache(dev)) + return 0; + + if (ata_id_has_flush_ext(dev->id)) + cmd = ATA_CMD_FLUSH_EXT; + else + cmd = ATA_CMD_FLUSH; + + return ata_do_simple_cmd(ap, dev, cmd); +} + +static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) +{ + return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); +} + +static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) +{ + return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); +} + +/** + * ata_device_resume - wakeup a previously suspended devices + * + * Kick the drive back into action, by sending it an idle immediate + * command and making sure its transfer mode matches between drive + * and host. + * + */ +int ata_device_resume(struct ata_port *ap, struct ata_device *dev) +{ + if (ap->flags & ATA_FLAG_SUSPENDED) { + ap->flags &= ~ATA_FLAG_SUSPENDED; + ata_set_mode(ap); + } + if (!ata_dev_present(dev)) + return 0; + if (dev->class == ATA_DEV_ATA) + ata_start_drive(ap, dev); + + return 0; +} + +/** + * ata_device_suspend - prepare a device for suspend + * + * Flush the cache on the drive, if appropriate, then issue a + * standbynow command. + * + */ +int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) +{ + if (!ata_dev_present(dev)) + return 0; + if (dev->class == ATA_DEV_ATA) + ata_flush_cache(ap, dev); + + ata_standby_drive(ap, dev); + ap->flags |= ATA_FLAG_SUSPENDED; + return 0; +} + int ata_port_start (struct ata_port *ap) { struct device *dev = ap->host_set->dev; @@ -4921,6 +5011,23 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) return (tmp == bits->val) ? 1 : 0; } + +int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +int ata_pci_device_resume(struct pci_dev *pdev) +{ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_device(pdev); + pci_set_master(pdev); + return 0; +} #endif /* CONFIG_PCI */ @@ -5024,4 +5131,11 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop); EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); EXPORT_SYMBOL_GPL(ata_pci_init_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one); +EXPORT_SYMBOL_GPL(ata_pci_device_suspend); +EXPORT_SYMBOL_GPL(ata_pci_device_resume); #endif /* CONFIG_PCI */ + +EXPORT_SYMBOL_GPL(ata_device_suspend); +EXPORT_SYMBOL_GPL(ata_device_resume); +EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); +EXPORT_SYMBOL_GPL(ata_scsi_device_resume); diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 2c644cb..cfbceb5 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -396,6 +396,22 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf) } } +int ata_scsi_device_resume(struct scsi_device *sdev) +{ + struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; + struct ata_device *dev = &ap->device[sdev->id]; + + return ata_device_resume(ap, dev); +} + +int ata_scsi_device_suspend(struct scsi_device *sdev) +{ + struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; + struct ata_device *dev = &ap->device[sdev->id]; + + return ata_device_suspend(ap, dev); +} + /** * ata_to_sense_error - convert ATA error to SCSI error * @id: ATA device number diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 15842b1..ea7f3a4 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -263,9 +263,40 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; } +static int scsi_bus_suspend(struct device * dev, pm_message_t state) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_host_template *sht = sdev->host->hostt; + int err; + + err = scsi_device_quiesce(sdev); + if (err) + return err; + + if (sht->suspend) + err = sht->suspend(sdev); + + return err; +} + +static int scsi_bus_resume(struct device * dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_host_template *sht = sdev->host->hostt; + int err = 0; + + if (sht->resume) + err = sht->resume(sdev); + + scsi_device_resume(sdev); + return err; +} + struct bus_type scsi_bus_type = { .name = "scsi", .match = scsi_bus_match, + .suspend = scsi_bus_suspend, + .resume = scsi_bus_resume, }; int scsi_sysfs_register(void) diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index b8727d9..1288d62 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -37,11 +37,11 @@ * by the bootloader or in the platform init code. * * The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2, - * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so - * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for - * the console code : without this 1:1 mapping, at early boot time, when we are - * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be - * mapped to. + * and so on). So the PSC1 is mapped to /dev/ttyPSC0, PSC2 to /dev/ttyPSC1 and + * so on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly + * fpr the console code : without this 1:1 mapping, at early boot time, when we + * are parsing the kernel args console=ttyPSC?, we wouldn't know wich PSC it + * will be mapped to. */ #include <linux/config.h> @@ -65,6 +65,10 @@ #include <linux/serial_core.h> +/* We've been assigned a range on the "Low-density serial ports" major */ +#define SERIAL_PSC_MAJOR 204 +#define SERIAL_PSC_MINOR 148 + #define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ @@ -668,15 +672,15 @@ mpc52xx_console_setup(struct console *co, char *options) } -extern struct uart_driver mpc52xx_uart_driver; +static struct uart_driver mpc52xx_uart_driver; static struct console mpc52xx_console = { - .name = "ttyS", + .name = "ttyPSC", .write = mpc52xx_console_write, .device = uart_console_device, .setup = mpc52xx_console_setup, .flags = CON_PRINTBUFFER, - .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */ + .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0 ) */ .data = &mpc52xx_uart_driver, }; @@ -703,10 +707,10 @@ console_initcall(mpc52xx_console_init); static struct uart_driver mpc52xx_uart_driver = { .owner = THIS_MODULE, .driver_name = "mpc52xx_psc_uart", - .dev_name = "ttyS", - .devfs_name = "ttyS", - .major = TTY_MAJOR, - .minor = 64, + .dev_name = "ttyPSC", + .devfs_name = "ttyPSC", + .major = SERIAL_PSC_MAJOR, + .minor = SERIAL_PSC_MINOR, .nr = MPC52xx_PSC_MAXNUM, .cons = MPC52xx_PSC_CONSOLE, }; @@ -324,7 +324,7 @@ void install_arg_page(struct vm_area_struct *vma, lru_cache_add_active(page); set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( page, vma->vm_page_prot)))); - page_add_anon_rmap(page, vma, address); + page_add_new_anon_rmap(page, vma, address); pte_unmap_unlock(pte, ptl); /* no need for flush_tlb */ diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 8f873e6..e08ab47 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -148,6 +148,26 @@ void fuse_release_background(struct fuse_req *req) spin_unlock(&fuse_lock); } +static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) +{ + int i; + struct fuse_init_out *arg = &req->misc.init_out; + + if (arg->major != FUSE_KERNEL_VERSION) + fc->conn_error = 1; + else { + fc->minor = arg->minor; + fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; + } + + /* After INIT reply is received other requests can go + out. So do (FUSE_MAX_OUTSTANDING - 1) number of + up()s on outstanding_sem. The last up() is done in + fuse_putback_request() */ + for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) + up(&fc->outstanding_sem); +} + /* * This function is called when a request is finished. Either a reply * has arrived or it was interrupted (and not yet sent) or some error @@ -172,19 +192,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) up_read(&fc->sbput_sem); } wake_up(&req->waitq); - if (req->in.h.opcode == FUSE_INIT) { - int i; - - if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION) - fc->conn_error = 1; - - /* After INIT reply is received other requests can go - out. So do (FUSE_MAX_OUTSTANDING - 1) number of - up()s on outstanding_sem. The last up() is done in - fuse_putback_request() */ - for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) - up(&fc->outstanding_sem); - } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) { + if (req->in.h.opcode == FUSE_INIT) + process_init_reply(fc, req); + else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) { /* Special case for failed iget in CREATE */ u64 nodeid = req->in.h.nodeid; __fuse_get_request(req); @@ -357,7 +367,7 @@ void fuse_send_init(struct fuse_conn *fc) /* This is called from fuse_read_super() so there's guaranteed to be a request available */ struct fuse_req *req = do_get_request(fc); - struct fuse_init_in_out *arg = &req->misc.init_in_out; + struct fuse_init_in *arg = &req->misc.init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; req->in.h.opcode = FUSE_INIT; @@ -365,8 +375,12 @@ void fuse_send_init(struct fuse_conn *fc) req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; req->out.numargs = 1; - req->out.args[0].size = sizeof(*arg); - req->out.args[0].value = arg; + /* Variable length arguement used for backward compatibility + with interface version < 7.5. Rest of init_out is zeroed + by do_get_request(), so a short reply is not a problem */ + req->out.argvar = 1; + req->out.args[0].size = sizeof(struct fuse_init_out); + req->out.args[0].value = &req->misc.init_out; request_send_background(fc, req); } @@ -615,6 +629,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, struct fuse_copy_state cs; unsigned reqsize; + restart: spin_lock(&fuse_lock); fc = file->private_data; err = -EPERM; @@ -630,20 +645,25 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, req = list_entry(fc->pending.next, struct fuse_req, list); list_del_init(&req->list); - spin_unlock(&fuse_lock); in = &req->in; - reqsize = req->in.h.len; - fuse_copy_init(&cs, 1, req, iov, nr_segs); - err = -EINVAL; - if (iov_length(iov, nr_segs) >= reqsize) { - err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); - if (!err) - err = fuse_copy_args(&cs, in->numargs, in->argpages, - (struct fuse_arg *) in->args, 0); + reqsize = in->h.len; + /* If request is too large, reply with an error and restart the read */ + if (iov_length(iov, nr_segs) < reqsize) { + req->out.h.error = -EIO; + /* SETXATTR is special, since it may contain too large data */ + if (in->h.opcode == FUSE_SETXATTR) + req->out.h.error = -E2BIG; + request_end(fc, req); + goto restart; } + spin_unlock(&fuse_lock); + fuse_copy_init(&cs, 1, req, iov, nr_segs); + err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); + if (!err) + err = fuse_copy_args(&cs, in->numargs, in->argpages, + (struct fuse_arg *) in->args, 0); fuse_copy_finish(&cs); - spin_lock(&fuse_lock); req->locked = 0; if (!err && req->interrupted) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 51f5da6..417bcee 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -13,8 +13,16 @@ #include <linux/gfp.h> #include <linux/sched.h> #include <linux/namei.h> -#include <linux/mount.h> +/* + * FUSE caches dentries and attributes with separate timeout. The + * time in jiffies until the dentry/attributes are valid is stored in + * dentry->d_time and fuse_inode->i_time respectively. + */ + +/* + * Calculate the time in jiffies until a dentry/attributes are valid + */ static inline unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) { @@ -22,6 +30,50 @@ static inline unsigned long time_to_jiffies(unsigned long sec, return jiffies + timespec_to_jiffies(&ts); } +/* + * Set dentry and possibly attribute timeouts from the lookup/mk* + * replies + */ +static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o) +{ + entry->d_time = time_to_jiffies(o->entry_valid, o->entry_valid_nsec); + if (entry->d_inode) + get_fuse_inode(entry->d_inode)->i_time = + time_to_jiffies(o->attr_valid, o->attr_valid_nsec); +} + +/* + * Mark the attributes as stale, so that at the next call to + * ->getattr() they will be fetched from userspace + */ +void fuse_invalidate_attr(struct inode *inode) +{ + get_fuse_inode(inode)->i_time = jiffies - 1; +} + +/* + * Just mark the entry as stale, so that a next attempt to look it up + * will result in a new lookup call to userspace + * + * This is called when a dentry is about to become negative and the + * timeout is unknown (unlink, rmdir, rename and in some cases + * lookup) + */ +static void fuse_invalidate_entry_cache(struct dentry *entry) +{ + entry->d_time = jiffies - 1; +} + +/* + * Same as fuse_invalidate_entry_cache(), but also try to remove the + * dentry from the hash + */ +static void fuse_invalidate_entry(struct dentry *entry) +{ + d_invalidate(entry); + fuse_invalidate_entry_cache(entry); +} + static void fuse_lookup_init(struct fuse_req *req, struct inode *dir, struct dentry *entry, struct fuse_entry_out *outarg) @@ -37,17 +89,34 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir, req->out.args[0].value = outarg; } +/* + * Check whether the dentry is still valid + * + * If the entry validity timeout has expired and the dentry is + * positive, try to redo the lookup. If the lookup results in a + * different inode, then let the VFS invalidate the dentry and redo + * the lookup once more. If the lookup results in the same inode, + * then refresh the attributes, timeouts and mark the dentry valid. + */ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) { - if (!entry->d_inode || is_bad_inode(entry->d_inode)) + struct inode *inode = entry->d_inode; + + if (inode && is_bad_inode(inode)) return 0; else if (time_after(jiffies, entry->d_time)) { int err; struct fuse_entry_out outarg; - struct inode *inode = entry->d_inode; - struct fuse_inode *fi = get_fuse_inode(inode); - struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_req *req = fuse_get_request(fc); + struct fuse_conn *fc; + struct fuse_req *req; + + /* Doesn't hurt to "reset" the validity timeout */ + fuse_invalidate_entry_cache(entry); + if (!inode) + return 0; + + fc = get_fuse_conn(inode); + req = fuse_get_request(fc); if (!req) return 0; @@ -55,6 +124,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) request_send(fc, req); err = req->out.h.error; if (!err) { + struct fuse_inode *fi = get_fuse_inode(inode); if (outarg.nodeid != get_node_id(inode)) { fuse_send_forget(fc, req, outarg.nodeid, 1); return 0; @@ -66,18 +136,18 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) return 0; fuse_change_attributes(inode, &outarg.attr); - entry->d_time = time_to_jiffies(outarg.entry_valid, - outarg.entry_valid_nsec); - fi->i_time = time_to_jiffies(outarg.attr_valid, - outarg.attr_valid_nsec); + fuse_change_timeout(entry, &outarg); } return 1; } +/* + * Check if there's already a hashed alias of this directory inode. + * If yes, then lookup and mkdir must not create a new alias. + */ static int dir_alias(struct inode *inode) { if (S_ISDIR(inode->i_mode)) { - /* Don't allow creating an alias to a directory */ struct dentry *alias = d_find_alias(inode); if (alias) { dput(alias); @@ -96,8 +166,14 @@ static struct dentry_operations fuse_dentry_operations = { .d_revalidate = fuse_dentry_revalidate, }; -static int fuse_lookup_iget(struct inode *dir, struct dentry *entry, - struct inode **inodep) +static inline int valid_mode(int m) +{ + return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || + S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m); +} + +static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, + struct nameidata *nd) { int err; struct fuse_entry_out outarg; @@ -106,53 +182,49 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry, struct fuse_req *req; if (entry->d_name.len > FUSE_NAME_MAX) - return -ENAMETOOLONG; + return ERR_PTR(-ENAMETOOLONG); req = fuse_get_request(fc); if (!req) - return -EINTR; + return ERR_PTR(-EINTR); fuse_lookup_init(req, dir, entry, &outarg); request_send(fc, req); err = req->out.h.error; - if (!err && invalid_nodeid(outarg.nodeid)) + if (!err && ((outarg.nodeid && invalid_nodeid(outarg.nodeid)) || + !valid_mode(outarg.attr.mode))) err = -EIO; - if (!err) { + if (!err && outarg.nodeid) { inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr); if (!inode) { fuse_send_forget(fc, req, outarg.nodeid, 1); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } } fuse_put_request(fc, req); if (err && err != -ENOENT) - return err; + return ERR_PTR(err); - if (inode) { - struct fuse_inode *fi = get_fuse_inode(inode); - entry->d_time = time_to_jiffies(outarg.entry_valid, - outarg.entry_valid_nsec); - fi->i_time = time_to_jiffies(outarg.attr_valid, - outarg.attr_valid_nsec); + if (inode && dir_alias(inode)) { + iput(inode); + return ERR_PTR(-EIO); } - + d_add(entry, inode); entry->d_op = &fuse_dentry_operations; - *inodep = inode; - return 0; -} - -void fuse_invalidate_attr(struct inode *inode) -{ - get_fuse_inode(inode)->i_time = jiffies - 1; -} - -static void fuse_invalidate_entry(struct dentry *entry) -{ - d_invalidate(entry); - entry->d_time = jiffies - 1; + if (!err) + fuse_change_timeout(entry, &outarg); + else + fuse_invalidate_entry_cache(entry); + return NULL; } +/* + * Atomic create+open operation + * + * If the filesystem doesn't support this, then fall back to separate + * 'mknod' + 'open' requests. + */ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, struct nameidata *nd) { @@ -163,7 +235,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, struct fuse_open_in inarg; struct fuse_open_out outopen; struct fuse_entry_out outentry; - struct fuse_inode *fi; struct fuse_file *ff; struct file *file; int flags = nd->intent.open.flags - 1; @@ -172,10 +243,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, if (fc->no_create) goto out; - err = -ENAMETOOLONG; - if (entry->d_name.len > FUSE_NAME_MAX) - goto out; - err = -EINTR; req = fuse_get_request(fc); if (!req) @@ -220,17 +287,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, if (!inode) { flags &= ~(O_CREAT | O_EXCL | O_TRUNC); ff->fh = outopen.fh; + /* Special release, with inode = NULL, this will + trigger a 'forget' request when the release is + complete */ fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0); goto out_put_request; } fuse_put_request(fc, req); - entry->d_time = time_to_jiffies(outentry.entry_valid, - outentry.entry_valid_nsec); - fi = get_fuse_inode(inode); - fi->i_time = time_to_jiffies(outentry.attr_valid, - outentry.attr_valid_nsec); - d_instantiate(entry, inode); + fuse_change_timeout(entry, &outentry); file = lookup_instantiate_filp(nd, entry, generic_file_open); if (IS_ERR(file)) { ff->fh = outopen.fh; @@ -248,13 +313,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, return err; } +/* + * Code shared between mknod, mkdir, symlink and link + */ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, struct inode *dir, struct dentry *entry, int mode) { struct fuse_entry_out outarg; struct inode *inode; - struct fuse_inode *fi; int err; req->in.h.nodeid = get_node_id(dir); @@ -268,10 +335,13 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, fuse_put_request(fc, req); return err; } - if (invalid_nodeid(outarg.nodeid)) { - fuse_put_request(fc, req); - return -EIO; - } + err = -EIO; + if (invalid_nodeid(outarg.nodeid)) + goto out_put_request; + + if ((outarg.attr.mode ^ mode) & S_IFMT) + goto out_put_request; + inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr); if (!inode) { @@ -280,22 +350,19 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, } fuse_put_request(fc, req); - /* Don't allow userspace to do really stupid things... */ - if (((inode->i_mode ^ mode) & S_IFMT) || dir_alias(inode)) { + if (dir_alias(inode)) { iput(inode); return -EIO; } - entry->d_time = time_to_jiffies(outarg.entry_valid, - outarg.entry_valid_nsec); - - fi = get_fuse_inode(inode); - fi->i_time = time_to_jiffies(outarg.attr_valid, - outarg.attr_valid_nsec); - d_instantiate(entry, inode); + fuse_change_timeout(entry, &outarg); fuse_invalidate_attr(dir); return 0; + + out_put_request: + fuse_put_request(fc, req); + return err; } static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode, @@ -355,12 +422,7 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry, { struct fuse_conn *fc = get_fuse_conn(dir); unsigned len = strlen(link) + 1; - struct fuse_req *req; - - if (len > FUSE_SYMLINK_MAX) - return -ENAMETOOLONG; - - req = fuse_get_request(fc); + struct fuse_req *req = fuse_get_request(fc); if (!req) return -EINTR; @@ -399,6 +461,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) inode->i_nlink = 0; fuse_invalidate_attr(inode); fuse_invalidate_attr(dir); + fuse_invalidate_entry_cache(entry); } else if (err == -EINTR) fuse_invalidate_entry(entry); return err; @@ -424,6 +487,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) if (!err) { entry->d_inode->i_nlink = 0; fuse_invalidate_attr(dir); + fuse_invalidate_entry_cache(entry); } else if (err == -EINTR) fuse_invalidate_entry(entry); return err; @@ -459,6 +523,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent, fuse_invalidate_attr(olddir); if (olddir != newdir) fuse_invalidate_attr(newdir); + + /* newent will end up negative */ + if (newent->d_inode) + fuse_invalidate_entry_cache(newent); } else if (err == -EINTR) { /* If request was interrupted, DEITY only knows if the rename actually took place. If the invalidation @@ -566,6 +634,15 @@ static int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task) return 0; } +/* + * Check whether the inode attributes are still valid + * + * If the attribute validity timeout has expired, then fetch the fresh + * attributes with a 'getattr' request + * + * I'm not sure why cached attributes are never returned for the root + * inode, this is probably being too cautious. + */ static int fuse_revalidate(struct dentry *entry) { struct inode *inode = entry->d_inode; @@ -613,6 +690,19 @@ static int fuse_access(struct inode *inode, int mask) return err; } +/* + * Check permission. The two basic access models of FUSE are: + * + * 1) Local access checking ('default_permissions' mount option) based + * on file mode. This is the plain old disk filesystem permission + * modell. + * + * 2) "Remote" access checking, where server is responsible for + * checking permission in each inode operation. An exception to this + * is if ->permission() was invoked from sys_access() in which case an + * access request is sent. Execute permission is still checked + * locally based on file mode. + */ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd) { struct fuse_conn *fc = get_fuse_conn(inode); @@ -631,14 +721,10 @@ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd) err = generic_permission(inode, mask, NULL); } - /* FIXME: Need some mechanism to revoke permissions: - currently if the filesystem suddenly changes the - file mode, we will not be informed about it, and - continue to allow access to the file/directory. - - This is actually not so grave, since the user can - simply keep access to the file/directory anyway by - keeping it open... */ + /* Note: the opposite of the above test does not + exist. So if permissions are revoked this won't be + noticed immediately, only after the attribute + timeout has expired */ return err; } else { @@ -691,7 +777,12 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) struct page *page; struct inode *inode = file->f_dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_req *req = fuse_get_request(fc); + struct fuse_req *req; + + if (is_bad_inode(inode)) + return -EIO; + + req = fuse_get_request(fc); if (!req) return -EINTR; @@ -806,6 +897,15 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg) } } +/* + * Set attributes, and at the same time refresh them. + * + * Truncation is slightly complicated, because the 'truncate' request + * may fail, in which case we don't want to touch the mapping. + * vmtruncate() doesn't allow for this case. So do the rlimit + * checking by hand and call vmtruncate() only after the file has + * actually been truncated. + */ static int fuse_setattr(struct dentry *entry, struct iattr *attr) { struct inode *inode = entry->d_inode; @@ -883,23 +983,6 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, return err; } -static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, - struct nameidata *nd) -{ - struct inode *inode; - int err; - - err = fuse_lookup_iget(dir, entry, &inode); - if (err) - return ERR_PTR(err); - if (inode && dir_alias(inode)) { - iput(inode); - return ERR_PTR(-EIO); - } - d_add(entry, inode); - return NULL; -} - static int fuse_setxattr(struct dentry *entry, const char *name, const void *value, size_t size, int flags) { @@ -909,9 +992,6 @@ static int fuse_setxattr(struct dentry *entry, const char *name, struct fuse_setxattr_in inarg; int err; - if (size > FUSE_XATTR_SIZE_MAX) - return -E2BIG; - if (fc->no_setxattr) return -EOPNOTSUPP; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 2ca8614..05deddd 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -163,6 +163,9 @@ static int fuse_flush(struct file *file) struct fuse_flush_in inarg; int err; + if (is_bad_inode(inode)) + return -EIO; + if (fc->no_flush) return 0; @@ -199,6 +202,9 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, struct fuse_fsync_in inarg; int err; + if (is_bad_inode(inode)) + return -EIO; + if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) return 0; @@ -272,16 +278,22 @@ static int fuse_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); - loff_t pos = (loff_t) page->index << PAGE_CACHE_SHIFT; - struct fuse_req *req = fuse_get_request(fc); - int err = -EINTR; + struct fuse_req *req; + int err; + + err = -EIO; + if (is_bad_inode(inode)) + goto out; + + err = -EINTR; + req = fuse_get_request(fc); if (!req) goto out; req->out.page_zeroing = 1; req->num_pages = 1; req->pages[0] = page; - fuse_send_read(req, file, inode, pos, PAGE_CACHE_SIZE); + fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE); err = req->out.h.error; fuse_put_request(fc, req); if (!err) @@ -295,7 +307,7 @@ static int fuse_readpage(struct file *file, struct page *page) static int fuse_send_readpages(struct fuse_req *req, struct file *file, struct inode *inode) { - loff_t pos = (loff_t) req->pages[0]->index << PAGE_CACHE_SHIFT; + loff_t pos = page_offset(req->pages[0]); size_t count = req->num_pages << PAGE_CACHE_SHIFT; unsigned i; req->out.page_zeroing = 1; @@ -345,6 +357,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_readpages_data data; int err; + + if (is_bad_inode(inode)) + return -EIO; + data.file = file; data.inode = inode; data.req = fuse_get_request(fc); @@ -402,8 +418,13 @@ static int fuse_commit_write(struct file *file, struct page *page, unsigned count = to - offset; struct inode *inode = page->mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); - loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; - struct fuse_req *req = fuse_get_request(fc); + loff_t pos = page_offset(page) + offset; + struct fuse_req *req; + + if (is_bad_inode(inode)) + return -EIO; + + req = fuse_get_request(fc); if (!req) return -EINTR; @@ -454,7 +475,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; - npages = min(npages, FUSE_MAX_PAGES_PER_REQ); + npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ); down_read(¤t->mm->mmap_sem); npages = get_user_pages(current, current->mm, user_addr, npages, write, 0, req->pages, NULL); @@ -475,12 +496,16 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf, size_t nmax = write ? fc->max_write : fc->max_read; loff_t pos = *ppos; ssize_t res = 0; - struct fuse_req *req = fuse_get_request(fc); + struct fuse_req *req; + + if (is_bad_inode(inode)) + return -EIO; + + req = fuse_get_request(fc); if (!req) return -EINTR; while (count) { - size_t tmp; size_t nres; size_t nbytes = min(count, nmax); int err = fuse_get_user_pages(req, buf, nbytes, !write); @@ -488,8 +513,8 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf, res = err; break; } - tmp = (req->num_pages << PAGE_SHIFT) - req->page_offset; - nbytes = min(nbytes, tmp); + nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; + nbytes = min(count, nbytes); if (write) nres = fuse_send_write(req, file, inode, pos, nbytes); else diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0ea5301..74c8d09 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -21,6 +21,9 @@ /** If more requests are outstanding, then the operation will block */ #define FUSE_MAX_OUTSTANDING 10 +/** It could be as large as PATH_MAX, but would that have any uses? */ +#define FUSE_NAME_MAX 1024 + /** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem module will check permissions based on the file mode. Otherwise no permission checking is done in the kernel */ @@ -108,9 +111,6 @@ struct fuse_out { struct fuse_arg args[3]; }; -struct fuse_req; -struct fuse_conn; - /** * A request to the client */ @@ -159,7 +159,8 @@ struct fuse_req { union { struct fuse_forget_in forget_in; struct fuse_release_in release_in; - struct fuse_init_in_out init_in_out; + struct fuse_init_in init_in; + struct fuse_init_out init_out; } misc; /** page vector */ @@ -272,6 +273,9 @@ struct fuse_conn { /** Is create not implemented by fs? */ unsigned no_create : 1; + /** Negotiated minor version */ + unsigned minor; + /** Backing dev info */ struct backing_dev_info bdi; }; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index e69a546..04c80cc 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -135,12 +135,8 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) fuse_init_common(inode); init_special_inode(inode, inode->i_mode, new_decode_dev(attr->rdev)); - } else { - /* Don't let user create weird files */ - inode->i_mode = S_IFREG; - fuse_init_common(inode); - fuse_init_file_inode(inode); - } + } else + BUG(); } static int fuse_inode_eq(struct inode *inode, void *_nodeidp) @@ -218,6 +214,7 @@ static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr { stbuf->f_type = FUSE_SUPER_MAGIC; stbuf->f_bsize = attr->bsize; + stbuf->f_frsize = attr->frsize; stbuf->f_blocks = attr->blocks; stbuf->f_bfree = attr->bfree; stbuf->f_bavail = attr->bavail; @@ -238,10 +235,12 @@ static int fuse_statfs(struct super_block *sb, struct kstatfs *buf) if (!req) return -EINTR; + memset(&outarg, 0, sizeof(outarg)); req->in.numargs = 0; req->in.h.opcode = FUSE_STATFS; req->out.numargs = 1; - req->out.args[0].size = sizeof(outarg); + req->out.args[0].size = + fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); req->out.args[0].value = &outarg; request_send(fc, req); err = req->out.h.error; @@ -482,7 +481,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) fc->max_read = d.max_read; if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages) fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE; - fc->max_write = FUSE_MAX_IN / 2; err = -ENOMEM; root = get_root_inode(sb, d.rootmode); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 8093351..6daaf7c 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -320,7 +320,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) /* temporarily use utf8 to correctly find the hidden dir below */ nls = sbi->nls; sbi->nls = load_nls("utf8"); - if (!nls) { + if (!sbi->nls) { printk("HFS+: unable to load nls for utf8\n"); err = -EINVAL; goto cleanup; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 8c1cef3..8c41315 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -100,9 +100,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) loff_t len, vma_len; int ret; - if ((vma->vm_flags & (VM_MAYSHARE | VM_WRITE)) == VM_WRITE) - return -EINVAL; - if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1)) return -EINVAL; diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 014a51f..cb3cef5 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -24,29 +24,75 @@ #include <linux/slab.h> /* - * Unlink a buffer from a transaction. + * Unlink a buffer from a transaction checkpoint list. * * Called with j_list_lock held. */ -static inline void __buffer_unlink(struct journal_head *jh) +static void __buffer_unlink_first(struct journal_head *jh) { transaction_t *transaction; transaction = jh->b_cp_transaction; - jh->b_cp_transaction = NULL; jh->b_cpnext->b_cpprev = jh->b_cpprev; jh->b_cpprev->b_cpnext = jh->b_cpnext; - if (transaction->t_checkpoint_list == jh) + if (transaction->t_checkpoint_list == jh) { transaction->t_checkpoint_list = jh->b_cpnext; - if (transaction->t_checkpoint_list == jh) - transaction->t_checkpoint_list = NULL; + if (transaction->t_checkpoint_list == jh) + transaction->t_checkpoint_list = NULL; + } +} + +/* + * Unlink a buffer from a transaction checkpoint(io) list. + * + * Called with j_list_lock held. + */ + +static inline void __buffer_unlink(struct journal_head *jh) +{ + transaction_t *transaction; + + transaction = jh->b_cp_transaction; + + __buffer_unlink_first(jh); + if (transaction->t_checkpoint_io_list == jh) { + transaction->t_checkpoint_io_list = jh->b_cpnext; + if (transaction->t_checkpoint_io_list == jh) + transaction->t_checkpoint_io_list = NULL; + } +} + +/* + * Move a buffer from the checkpoint list to the checkpoint io list + * + * Called with j_list_lock held + */ + +static inline void __buffer_relink_io(struct journal_head *jh) +{ + transaction_t *transaction; + + transaction = jh->b_cp_transaction; + __buffer_unlink_first(jh); + + if (!transaction->t_checkpoint_io_list) { + jh->b_cpnext = jh->b_cpprev = jh; + } else { + jh->b_cpnext = transaction->t_checkpoint_io_list; + jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev; + jh->b_cpprev->b_cpnext = jh; + jh->b_cpnext->b_cpprev = jh; + } + transaction->t_checkpoint_io_list = jh; } /* * Try to release a checkpointed buffer from its transaction. - * Returns 1 if we released it. + * Returns 1 if we released it and 2 if we also released the + * whole transaction. + * * Requires j_list_lock * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it */ @@ -57,12 +103,11 @@ static int __try_to_free_cp_buf(struct journal_head *jh) if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) { JBUFFER_TRACE(jh, "remove from checkpoint list"); - __journal_remove_checkpoint(jh); + ret = __journal_remove_checkpoint(jh) + 1; jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); BUFFER_TRACE(bh, "release"); __brelse(bh); - ret = 1; } else { jbd_unlock_bh_state(bh); } @@ -117,83 +162,53 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh) } /* - * Clean up a transaction's checkpoint list. - * - * We wait for any pending IO to complete and make sure any clean - * buffers are removed from the transaction. - * - * Return 1 if we performed any actions which might have destroyed the - * checkpoint. (journal_remove_checkpoint() deletes the transaction when - * the last checkpoint buffer is cleansed) + * Clean up transaction's list of buffers submitted for io. + * We wait for any pending IO to complete and remove any clean + * buffers. Note that we take the buffers in the opposite ordering + * from the one in which they were submitted for IO. * * Called with j_list_lock held. */ -static int __cleanup_transaction(journal_t *journal, transaction_t *transaction) + +static void __wait_cp_io(journal_t *journal, transaction_t *transaction) { - struct journal_head *jh, *next_jh, *last_jh; + struct journal_head *jh; struct buffer_head *bh; - int ret = 0; - - assert_spin_locked(&journal->j_list_lock); - jh = transaction->t_checkpoint_list; - if (!jh) - return 0; - - last_jh = jh->b_cpprev; - next_jh = jh; - do { - jh = next_jh; + tid_t this_tid; + int released = 0; + + this_tid = transaction->t_tid; +restart: + /* Didn't somebody clean up the transaction in the meanwhile */ + if (journal->j_checkpoint_transactions != transaction || + transaction->t_tid != this_tid) + return; + while (!released && transaction->t_checkpoint_io_list) { + jh = transaction->t_checkpoint_io_list; bh = jh2bh(jh); + if (!jbd_trylock_bh_state(bh)) { + jbd_sync_bh(journal, bh); + spin_lock(&journal->j_list_lock); + goto restart; + } if (buffer_locked(bh)) { atomic_inc(&bh->b_count); spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); - goto out_return_1; - } - - /* - * This is foul - */ - if (!jbd_trylock_bh_state(bh)) { - jbd_sync_bh(journal, bh); - goto out_return_1; + spin_lock(&journal->j_list_lock); + goto restart; } - - if (jh->b_transaction != NULL) { - transaction_t *t = jh->b_transaction; - tid_t tid = t->t_tid; - - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - log_start_commit(journal, tid); - log_wait_commit(journal, tid); - goto out_return_1; - } - /* - * AKPM: I think the buffer_jbddirty test is redundant - it - * shouldn't have NULL b_transaction? + * Now in whatever state the buffer currently is, we know that + * it has been written out and so we can drop it from the list */ - next_jh = jh->b_cpnext; - if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) { - BUFFER_TRACE(bh, "remove from checkpoint"); - __journal_remove_checkpoint(jh); - jbd_unlock_bh_state(bh); - journal_remove_journal_head(bh); - __brelse(bh); - ret = 1; - } else { - jbd_unlock_bh_state(bh); - } - } while (jh != last_jh); - - return ret; -out_return_1: - spin_lock(&journal->j_list_lock); - return 1; + released = __journal_remove_checkpoint(jh); + jbd_unlock_bh_state(bh); + } } #define NR_BATCH 64 @@ -203,9 +218,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) { int i; - spin_unlock(&journal->j_list_lock); ll_rw_block(SWRITE, *batch_count, bhs); - spin_lock(&journal->j_list_lock); for (i = 0; i < *batch_count; i++) { struct buffer_head *bh = bhs[i]; clear_buffer_jwrite(bh); @@ -221,19 +234,46 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) * Return 1 if something happened which requires us to abort the current * scan of the checkpoint list. * - * Called with j_list_lock held. + * Called with j_list_lock held and drops it if 1 is returned * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it */ -static int __flush_buffer(journal_t *journal, struct journal_head *jh, - struct buffer_head **bhs, int *batch_count, - int *drop_count) +static int __process_buffer(journal_t *journal, struct journal_head *jh, + struct buffer_head **bhs, int *batch_count) { struct buffer_head *bh = jh2bh(jh); int ret = 0; - if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) { - J_ASSERT_JH(jh, jh->b_transaction == NULL); + if (buffer_locked(bh)) { + get_bh(bh); + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); + wait_on_buffer(bh); + /* the journal_head may have gone by now */ + BUFFER_TRACE(bh, "brelse"); + put_bh(bh); + ret = 1; + } + else if (jh->b_transaction != NULL) { + transaction_t *t = jh->b_transaction; + tid_t tid = t->t_tid; + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); + log_start_commit(journal, tid); + log_wait_commit(journal, tid); + ret = 1; + } + else if (!buffer_dirty(bh)) { + J_ASSERT_JH(jh, !buffer_jbddirty(bh)); + BUFFER_TRACE(bh, "remove from checkpoint"); + __journal_remove_checkpoint(jh); + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); + journal_remove_journal_head(bh); + put_bh(bh); + ret = 1; + } + else { /* * Important: we are about to write the buffer, and * possibly block, while still holding the journal lock. @@ -246,45 +286,30 @@ static int __flush_buffer(journal_t *journal, struct journal_head *jh, J_ASSERT_BH(bh, !buffer_jwrite(bh)); set_buffer_jwrite(bh); bhs[*batch_count] = bh; + __buffer_relink_io(jh); jbd_unlock_bh_state(bh); (*batch_count)++; if (*batch_count == NR_BATCH) { + spin_unlock(&journal->j_list_lock); __flush_batch(journal, bhs, batch_count); ret = 1; } - } else { - int last_buffer = 0; - if (jh->b_cpnext == jh) { - /* We may be about to drop the transaction. Tell the - * caller that the lists have changed. - */ - last_buffer = 1; - } - if (__try_to_free_cp_buf(jh)) { - (*drop_count)++; - ret = last_buffer; - } } return ret; } /* - * Perform an actual checkpoint. We don't write out only enough to - * satisfy the current blocked requests: rather we submit a reasonably - * sized chunk of the outstanding data to disk at once for - * efficiency. __log_wait_for_space() will retry if we didn't free enough. + * Perform an actual checkpoint. We take the first transaction on the + * list of transactions to be checkpointed and send all its buffers + * to disk. We submit larger chunks of data at once. * - * However, we _do_ take into account the amount requested so that once - * the IO has been queued, we can return as soon as enough of it has - * completed to disk. - * * The journal should be locked before calling this function. */ int log_do_checkpoint(journal_t *journal) { + transaction_t *transaction; + tid_t this_tid; int result; - int batch_count = 0; - struct buffer_head *bhs[NR_BATCH]; jbd_debug(1, "Start checkpoint\n"); @@ -299,79 +324,70 @@ int log_do_checkpoint(journal_t *journal) return result; /* - * OK, we need to start writing disk blocks. Try to free up a - * quarter of the log in a single checkpoint if we can. + * OK, we need to start writing disk blocks. Take one transaction + * and write it. */ + spin_lock(&journal->j_list_lock); + if (!journal->j_checkpoint_transactions) + goto out; + transaction = journal->j_checkpoint_transactions; + this_tid = transaction->t_tid; +restart: /* - * AKPM: check this code. I had a feeling a while back that it - * degenerates into a busy loop at unmount time. + * If someone cleaned up this transaction while we slept, we're + * done (maybe it's a new transaction, but it fell at the same + * address). */ - spin_lock(&journal->j_list_lock); - while (journal->j_checkpoint_transactions) { - transaction_t *transaction; - struct journal_head *jh, *last_jh, *next_jh; - int drop_count = 0; - int cleanup_ret, retry = 0; - tid_t this_tid; - - transaction = journal->j_checkpoint_transactions; - this_tid = transaction->t_tid; - jh = transaction->t_checkpoint_list; - last_jh = jh->b_cpprev; - next_jh = jh; - do { + if (journal->j_checkpoint_transactions == transaction || + transaction->t_tid == this_tid) { + int batch_count = 0; + struct buffer_head *bhs[NR_BATCH]; + struct journal_head *jh; + int retry = 0; + + while (!retry && transaction->t_checkpoint_list) { struct buffer_head *bh; - jh = next_jh; - next_jh = jh->b_cpnext; + jh = transaction->t_checkpoint_list; bh = jh2bh(jh); if (!jbd_trylock_bh_state(bh)) { jbd_sync_bh(journal, bh); - spin_lock(&journal->j_list_lock); retry = 1; break; } - retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); - if (cond_resched_lock(&journal->j_list_lock)) { + retry = __process_buffer(journal, jh, bhs, + &batch_count); + if (!retry && + lock_need_resched(&journal->j_list_lock)) { + spin_unlock(&journal->j_list_lock); retry = 1; break; } - } while (jh != last_jh && !retry); + } if (batch_count) { + if (!retry) { + spin_unlock(&journal->j_list_lock); + retry = 1; + } __flush_batch(journal, bhs, &batch_count); - retry = 1; } + if (retry) { + spin_lock(&journal->j_list_lock); + goto restart; + } /* - * If someone cleaned up this transaction while we slept, we're - * done - */ - if (journal->j_checkpoint_transactions != transaction) - break; - if (retry) - continue; - /* - * Maybe it's a new transaction, but it fell at the same - * address - */ - if (transaction->t_tid != this_tid) - continue; - /* - * We have walked the whole transaction list without - * finding anything to write to disk. We had better be - * able to make some progress or we are in trouble. + * Now we have cleaned up the first transaction's checkpoint + * list. Let's clean up the second one. */ - cleanup_ret = __cleanup_transaction(journal, transaction); - J_ASSERT(drop_count != 0 || cleanup_ret != 0); - if (journal->j_checkpoint_transactions != transaction) - break; + __wait_cp_io(journal, transaction); } +out: spin_unlock(&journal->j_list_lock); result = cleanup_journal_tail(journal); if (result < 0) return result; - return 0; } @@ -456,52 +472,91 @@ int cleanup_journal_tail(journal_t *journal) /* Checkpoint list management */ /* + * journal_clean_one_cp_list + * + * Find all the written-back checkpoint buffers in the given list and release them. + * + * Called with the journal locked. + * Called with j_list_lock held. + * Returns number of bufers reaped (for debug) + */ + +static int journal_clean_one_cp_list(struct journal_head *jh, int *released) +{ + struct journal_head *last_jh; + struct journal_head *next_jh = jh; + int ret, freed = 0; + + *released = 0; + if (!jh) + return 0; + + last_jh = jh->b_cpprev; + do { + jh = next_jh; + next_jh = jh->b_cpnext; + /* Use trylock because of the ranking */ + if (jbd_trylock_bh_state(jh2bh(jh))) { + ret = __try_to_free_cp_buf(jh); + if (ret) { + freed++; + if (ret == 2) { + *released = 1; + return freed; + } + } + } + /* + * This function only frees up some memory if possible so we + * dont have an obligation to finish processing. Bail out if + * preemption requested: + */ + if (need_resched()) + return freed; + } while (jh != last_jh); + + return freed; +} + +/* * journal_clean_checkpoint_list * * Find all the written-back checkpoint buffers in the journal and release them. * * Called with the journal locked. * Called with j_list_lock held. - * Returns number of bufers reaped (for debug) + * Returns number of buffers reaped (for debug) */ int __journal_clean_checkpoint_list(journal_t *journal) { transaction_t *transaction, *last_transaction, *next_transaction; - int ret = 0; + int ret = 0, released; transaction = journal->j_checkpoint_transactions; - if (transaction == 0) + if (!transaction) goto out; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { - struct journal_head *jh; - transaction = next_transaction; next_transaction = transaction->t_cpnext; - jh = transaction->t_checkpoint_list; - if (jh) { - struct journal_head *last_jh = jh->b_cpprev; - struct journal_head *next_jh = jh; - - do { - jh = next_jh; - next_jh = jh->b_cpnext; - /* Use trylock because of the ranknig */ - if (jbd_trylock_bh_state(jh2bh(jh))) - ret += __try_to_free_cp_buf(jh); - /* - * This function only frees up some memory - * if possible so we dont have an obligation - * to finish processing. Bail out if preemption - * requested: - */ - if (need_resched()) - goto out; - } while (jh != last_jh); - } + ret += journal_clean_one_cp_list(transaction-> + t_checkpoint_list, &released); + if (need_resched()) + goto out; + if (released) + continue; + /* + * It is essential that we are as careful as in the case of + * t_checkpoint_list with removing the buffer from the list as + * we can possibly see not yet submitted buffers on io_list + */ + ret += journal_clean_one_cp_list(transaction-> + t_checkpoint_io_list, &released); + if (need_resched()) + goto out; } while (transaction != last_transaction); out: return ret; @@ -516,18 +571,22 @@ out: * buffer updates committed in that transaction have safely been stored * elsewhere on disk. To achieve this, all of the buffers in a * transaction need to be maintained on the transaction's checkpoint - * list until they have been rewritten, at which point this function is + * lists until they have been rewritten, at which point this function is * called to remove the buffer from the existing transaction's - * checkpoint list. + * checkpoint lists. + * + * The function returns 1 if it frees the transaction, 0 otherwise. * * This function is called with the journal locked. * This function is called with j_list_lock held. + * This function is called with jbd_lock_bh_state(jh2bh(jh)) */ -void __journal_remove_checkpoint(struct journal_head *jh) +int __journal_remove_checkpoint(struct journal_head *jh) { transaction_t *transaction; journal_t *journal; + int ret = 0; JBUFFER_TRACE(jh, "entry"); @@ -538,8 +597,10 @@ void __journal_remove_checkpoint(struct journal_head *jh) journal = transaction->t_journal; __buffer_unlink(jh); + jh->b_cp_transaction = NULL; - if (transaction->t_checkpoint_list != NULL) + if (transaction->t_checkpoint_list != NULL || + transaction->t_checkpoint_io_list != NULL) goto out; JBUFFER_TRACE(jh, "transaction has no more buffers"); @@ -565,8 +626,10 @@ void __journal_remove_checkpoint(struct journal_head *jh) /* Just in case anybody was waiting for more transactions to be checkpointed... */ wake_up(&journal->j_wait_logspace); + ret = 1; out: JBUFFER_TRACE(jh, "exit"); + return ret; } /* @@ -628,6 +691,7 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction) J_ASSERT(transaction->t_shadow_list == NULL); J_ASSERT(transaction->t_log_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); + J_ASSERT(transaction->t_checkpoint_io_list == NULL); J_ASSERT(transaction->t_updates == 0); J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 041380f..6d2dfed 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -56,13 +56,20 @@ static int nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp, struct nfsd3_attrstat *resp) { - int nfserr; + int err, nfserr; dprintk("nfsd: GETATTR(3) %s\n", - SVCFH_fmt(&argp->fh)); + SVCFH_fmt(&argp->fh)); fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP); + if (nfserr) + RETURN_STATUS(nfserr); + + err = vfs_getattr(resp->fh.fh_export->ex_mnt, + resp->fh.fh_dentry, &resp->stat); + nfserr = nfserrno(err); + RETURN_STATUS(nfserr); } diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 9147b85..243d94b 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -154,37 +154,34 @@ decode_sattr3(u32 *p, struct iattr *iap) } static inline u32 * -encode_fattr3(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) +encode_fattr3(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp, + struct kstat *stat) { - struct vfsmount *mnt = fhp->fh_export->ex_mnt; struct dentry *dentry = fhp->fh_dentry; - struct kstat stat; struct timespec time; - vfs_getattr(mnt, dentry, &stat); - - *p++ = htonl(nfs3_ftypes[(stat.mode & S_IFMT) >> 12]); - *p++ = htonl((u32) stat.mode); - *p++ = htonl((u32) stat.nlink); - *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid)); - *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid)); - if (S_ISLNK(stat.mode) && stat.size > NFS3_MAXPATHLEN) { + *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]); + *p++ = htonl((u32) stat->mode); + *p++ = htonl((u32) stat->nlink); + *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid)); + *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid)); + if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) { p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN); } else { - p = xdr_encode_hyper(p, (u64) stat.size); + p = xdr_encode_hyper(p, (u64) stat->size); } - p = xdr_encode_hyper(p, ((u64)stat.blocks) << 9); - *p++ = htonl((u32) MAJOR(stat.rdev)); - *p++ = htonl((u32) MINOR(stat.rdev)); + p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9); + *p++ = htonl((u32) MAJOR(stat->rdev)); + *p++ = htonl((u32) MINOR(stat->rdev)); if (is_fsid(fhp, rqstp->rq_reffh)) p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid); else - p = xdr_encode_hyper(p, (u64) huge_encode_dev(stat.dev)); - p = xdr_encode_hyper(p, (u64) stat.ino); - p = encode_time3(p, &stat.atime); + p = xdr_encode_hyper(p, (u64) huge_encode_dev(stat->dev)); + p = xdr_encode_hyper(p, (u64) stat->ino); + p = encode_time3(p, &stat->atime); lease_get_mtime(dentry->d_inode, &time); p = encode_time3(p, &time); - p = encode_time3(p, &stat.ctime); + p = encode_time3(p, &stat->ctime); return p; } @@ -232,8 +229,14 @@ encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; if (dentry && dentry->d_inode != NULL) { - *p++ = xdr_one; /* attributes follow */ - return encode_fattr3(rqstp, p, fhp); + int err; + struct kstat stat; + + err = vfs_getattr(fhp->fh_export->ex_mnt, dentry, &stat); + if (!err) { + *p++ = xdr_one; /* attributes follow */ + return encode_fattr3(rqstp, p, fhp, &stat); + } } *p++ = xdr_zero; return p; @@ -616,7 +619,7 @@ nfs3svc_encode_attrstat(struct svc_rqst *rqstp, u32 *p, struct nfsd3_attrstat *resp) { if (resp->status == 0) - p = encode_fattr3(rqstp, p, &resp->fh); + p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index b45999f..aa7bb41 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -152,46 +152,44 @@ decode_sattr(u32 *p, struct iattr *iap) } static inline u32 * -encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) +encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp, + struct kstat *stat) { - struct vfsmount *mnt = fhp->fh_export->ex_mnt; struct dentry *dentry = fhp->fh_dentry; - struct kstat stat; int type; struct timespec time; - vfs_getattr(mnt, dentry, &stat); - type = (stat.mode & S_IFMT); + type = (stat->mode & S_IFMT); *p++ = htonl(nfs_ftypes[type >> 12]); - *p++ = htonl((u32) stat.mode); - *p++ = htonl((u32) stat.nlink); - *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid)); - *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid)); + *p++ = htonl((u32) stat->mode); + *p++ = htonl((u32) stat->nlink); + *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid)); + *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid)); - if (S_ISLNK(type) && stat.size > NFS_MAXPATHLEN) { + if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); } else { - *p++ = htonl((u32) stat.size); + *p++ = htonl((u32) stat->size); } - *p++ = htonl((u32) stat.blksize); + *p++ = htonl((u32) stat->blksize); if (S_ISCHR(type) || S_ISBLK(type)) - *p++ = htonl(new_encode_dev(stat.rdev)); + *p++ = htonl(new_encode_dev(stat->rdev)); else *p++ = htonl(0xffffffff); - *p++ = htonl((u32) stat.blocks); + *p++ = htonl((u32) stat->blocks); if (is_fsid(fhp, rqstp->rq_reffh)) *p++ = htonl((u32) fhp->fh_export->ex_fsid); else - *p++ = htonl(new_encode_dev(stat.dev)); - *p++ = htonl((u32) stat.ino); - *p++ = htonl((u32) stat.atime.tv_sec); - *p++ = htonl(stat.atime.tv_nsec ? stat.atime.tv_nsec / 1000 : 0); + *p++ = htonl(new_encode_dev(stat->dev)); + *p++ = htonl((u32) stat->ino); + *p++ = htonl((u32) stat->atime.tv_sec); + *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); lease_get_mtime(dentry->d_inode, &time); *p++ = htonl((u32) time.tv_sec); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); - *p++ = htonl((u32) stat.ctime.tv_sec); - *p++ = htonl(stat.ctime.tv_nsec ? stat.ctime.tv_nsec / 1000 : 0); + *p++ = htonl((u32) stat->ctime.tv_sec); + *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0); return p; } @@ -199,7 +197,9 @@ encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) /* Helper function for NFSv2 ACL code */ u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) { - return encode_fattr(rqstp, p, fhp); + struct kstat stat; + vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, &stat); + return encode_fattr(rqstp, p, fhp, &stat); } /* @@ -394,7 +394,7 @@ int nfssvc_encode_attrstat(struct svc_rqst *rqstp, u32 *p, struct nfsd_attrstat *resp) { - p = encode_fattr(rqstp, p, &resp->fh); + p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } @@ -403,7 +403,7 @@ nfssvc_encode_diropres(struct svc_rqst *rqstp, u32 *p, struct nfsd_diropres *resp) { p = encode_fh(p, &resp->fh); - p = encode_fattr(rqstp, p, &resp->fh); + p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } @@ -428,7 +428,7 @@ int nfssvc_encode_readres(struct svc_rqst *rqstp, u32 *p, struct nfsd_readres *resp) { - p = encode_fattr(rqstp, p, &resp->fh); + p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->count); xdr_ressize_check(rqstp, p); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index af7c3c3..df4019f 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -717,27 +717,33 @@ nfsd_close(struct file *filp) * As this calls fsync (not fdatasync) there is no need for a write_inode * after it. */ -static inline void nfsd_dosync(struct file *filp, struct dentry *dp, - struct file_operations *fop) +static inline int nfsd_dosync(struct file *filp, struct dentry *dp, + struct file_operations *fop) { struct inode *inode = dp->d_inode; int (*fsync) (struct file *, struct dentry *, int); + int err = nfs_ok; filemap_fdatawrite(inode->i_mapping); if (fop && (fsync = fop->fsync)) - fsync(filp, dp, 0); + err=fsync(filp, dp, 0); filemap_fdatawait(inode->i_mapping); + + return nfserrno(err); } -static void +static int nfsd_sync(struct file *filp) { + int err; struct inode *inode = filp->f_dentry->d_inode; dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name); down(&inode->i_sem); - nfsd_dosync(filp, filp->f_dentry, filp->f_op); + err=nfsd_dosync(filp, filp->f_dentry, filp->f_op); up(&inode->i_sem); + + return err; } void @@ -874,6 +880,16 @@ out: return err; } +static void kill_suid(struct dentry *dentry) +{ + struct iattr ia; + ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID; + + down(&dentry->d_inode->i_sem); + notify_change(dentry, &ia); + up(&dentry->d_inode->i_sem); +} + static inline int nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, @@ -927,14 +943,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, } /* clear setuid/setgid flag after write */ - if (err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID))) { - struct iattr ia; - ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID; - - down(&inode->i_sem); - notify_change(dentry, &ia); - up(&inode->i_sem); - } + if (err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID))) + kill_suid(dentry); if (err >= 0 && stable) { static ino_t last_ino; @@ -962,7 +972,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, if (inode->i_state & I_DIRTY) { dprintk("nfsd: write sync %d\n", current->pid); - nfsd_sync(file); + err=nfsd_sync(file); } #if 0 wake_up(&inode->i_wait); @@ -1066,7 +1076,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, return err; if (EX_ISSYNC(fhp->fh_export)) { if (file->f_op && file->f_op->fsync) { - nfsd_sync(file); + err = nfsd_sync(file); } else { err = nfserr_notsupp; } diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig index 656bc43..e227a04 100644 --- a/fs/partitions/Kconfig +++ b/fs/partitions/Kconfig @@ -85,7 +85,7 @@ config ATARI_PARTITION config IBM_PARTITION bool "IBM disk label and partition support" - depends on PARTITION_ADVANCED && ARCH_S390 + depends on PARTITION_ADVANCED && S390 help Say Y here if you would like to be able to read the hard disk partition table format used by IBM DASD disks operating under CMS. diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c index 6327bcb..78010ad 100644 --- a/fs/partitions/ibm.c +++ b/fs/partitions/ibm.c @@ -56,7 +56,10 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) struct hd_geometry *geo; char type[5] = {0,}; char name[7] = {0,}; - struct vtoc_volume_label *vlabel; + union label_t { + struct vtoc_volume_label vol; + struct vtoc_cms_label cms; + } *label; unsigned char *data; Sector sect; @@ -64,9 +67,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) goto out_noinfo; if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) goto out_nogeo; - if ((vlabel = kmalloc(sizeof(struct vtoc_volume_label), - GFP_KERNEL)) == NULL) - goto out_novlab; + if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL) + goto out_nolab; if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 || ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) @@ -87,7 +89,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) strncpy(name, data + 8, 6); else strncpy(name, data + 4, 6); - memcpy (vlabel, data, sizeof(struct vtoc_volume_label)); + memcpy(label, data, sizeof(union label_t)); put_dev_sector(sect); EBCASC(type, 4); @@ -100,14 +102,12 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) /* * VM style CMS1 labeled disk */ - int *label = (int *) vlabel; - - if (label[13] != 0) { + if (label->cms.disk_offset != 0) { printk("CMS1/%8s(MDSK):", name); /* disk is reserved minidisk */ - blocksize = label[3]; - offset = label[13]; - size = (label[7] - 1)*(blocksize >> 9); + blocksize = label->cms.block_size; + offset = label->cms.disk_offset; + size = (label->cms.block_count - 1) * (blocksize >> 9); } else { printk("CMS1/%8s:", name); offset = (info->label_block + 1); @@ -126,7 +126,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) printk("VOL1/%8s:", name); /* get block number and read then go through format1 labels */ - blk = cchhb2blk(&vlabel->vtoc, geo) + 1; + blk = cchhb2blk(&label->vol.vtoc, geo) + 1; counter = 0; while ((data = read_dev_sector(bdev, blk*(blocksize/512), §)) != NULL) { @@ -174,7 +174,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) } printk("\n"); - kfree(vlabel); + kfree(label); kfree(geo); kfree(info); return 1; @@ -182,8 +182,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) out_readerr: out_badsect: out_noioctl: - kfree(vlabel); -out_novlab: + kfree(label); +out_nolab: kfree(geo); out_nogeo: kfree(info); diff --git a/fs/proc/array.c b/fs/proc/array.c index 3e1239e..5e9251f 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -308,7 +308,7 @@ int proc_pid_status(struct task_struct *task, char * buffer) buffer = task_sig(task, buffer); buffer = task_cap(task, buffer); buffer = cpuset_task_status_allowed(task, buffer); -#if defined(CONFIG_ARCH_S390) +#if defined(CONFIG_S390) buffer = task_show_regs(task, buffer); #endif return buffer - orig; diff --git a/fs/ramfs/Makefile b/fs/ramfs/Makefile index f096f30..5a0236e 100644 --- a/fs/ramfs/Makefile +++ b/fs/ramfs/Makefile @@ -4,4 +4,6 @@ obj-$(CONFIG_RAMFS) += ramfs.o -ramfs-objs := inode.o +file-mmu-y := file-nommu.o +file-mmu-$(CONFIG_MMU) := file-mmu.o +ramfs-objs += inode.o $(file-mmu-y) diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c new file mode 100644 index 0000000..2115383 --- /dev/null +++ b/fs/ramfs/file-mmu.c @@ -0,0 +1,57 @@ +/* file-mmu.c: ramfs MMU-based file operations + * + * Resizable simple ram filesystem for Linux. + * + * Copyright (C) 2000 Linus Torvalds. + * 2000 Transmeta Corp. + * + * Usage limits added by David Gibson, Linuxcare Australia. + * This file is released under the GPL. + */ + +/* + * NOTE! This filesystem is probably most useful + * not as a real filesystem, but as an example of + * how virtual filesystems can be written. + * + * It doesn't get much simpler than this. Consider + * that this file implements the full semantics of + * a POSIX-compliant read-write filesystem. + * + * Note in particular how the filesystem does not + * need to implement any data structures of its own + * to keep track of the virtual data: using the VFS + * caches is sufficient. + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/pagemap.h> +#include <linux/highmem.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/smp_lock.h> +#include <linux/backing-dev.h> +#include <linux/ramfs.h> + +#include <asm/uaccess.h> +#include "internal.h" + +struct address_space_operations ramfs_aops = { + .readpage = simple_readpage, + .prepare_write = simple_prepare_write, + .commit_write = simple_commit_write +}; + +struct file_operations ramfs_file_operations = { + .read = generic_file_read, + .write = generic_file_write, + .mmap = generic_file_mmap, + .fsync = simple_sync_file, + .sendfile = generic_file_sendfile, + .llseek = generic_file_llseek, +}; + +struct inode_operations ramfs_file_inode_operations = { + .getattr = simple_getattr, +}; diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c new file mode 100644 index 0000000..3f810ac --- /dev/null +++ b/fs/ramfs/file-nommu.c @@ -0,0 +1,292 @@ +/* file-nommu.c: no-MMU version of ramfs + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/pagemap.h> +#include <linux/highmem.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/smp_lock.h> +#include <linux/backing-dev.h> +#include <linux/ramfs.h> +#include <linux/quotaops.h> +#include <linux/pagevec.h> +#include <linux/mman.h> + +#include <asm/uaccess.h> +#include "internal.h" + +static int ramfs_nommu_setattr(struct dentry *, struct iattr *); + +struct address_space_operations ramfs_aops = { + .readpage = simple_readpage, + .prepare_write = simple_prepare_write, + .commit_write = simple_commit_write +}; + +struct file_operations ramfs_file_operations = { + .mmap = ramfs_nommu_mmap, + .get_unmapped_area = ramfs_nommu_get_unmapped_area, + .read = generic_file_read, + .write = generic_file_write, + .fsync = simple_sync_file, + .sendfile = generic_file_sendfile, + .llseek = generic_file_llseek, +}; + +struct inode_operations ramfs_file_inode_operations = { + .setattr = ramfs_nommu_setattr, + .getattr = simple_getattr, +}; + +/*****************************************************************************/ +/* + * add a contiguous set of pages into a ramfs inode when it's truncated from + * size 0 on the assumption that it's going to be used for an mmap of shared + * memory + */ +static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) +{ + struct pagevec lru_pvec; + unsigned long npages, xpages, loop, limit; + struct page *pages; + unsigned order; + void *data; + int ret; + + /* make various checks */ + order = get_order(newsize); + if (unlikely(order >= MAX_ORDER)) + goto too_big; + + limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + if (limit != RLIM_INFINITY && newsize > limit) + goto fsize_exceeded; + + if (newsize > inode->i_sb->s_maxbytes) + goto too_big; + + i_size_write(inode, newsize); + + /* allocate enough contiguous pages to be able to satisfy the + * request */ + pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); + if (!pages) + return -ENOMEM; + + /* split the high-order page into an array of single pages */ + xpages = 1UL << order; + npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; + + for (loop = 0; loop < npages; loop++) + set_page_count(pages + loop, 1); + + /* trim off any pages we don't actually require */ + for (loop = npages; loop < xpages; loop++) + __free_page(pages + loop); + + /* clear the memory we allocated */ + newsize = PAGE_SIZE * npages; + data = page_address(pages); + memset(data, 0, newsize); + + /* attach all the pages to the inode's address space */ + pagevec_init(&lru_pvec, 0); + for (loop = 0; loop < npages; loop++) { + struct page *page = pages + loop; + + ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL); + if (ret < 0) + goto add_error; + + if (!pagevec_add(&lru_pvec, page)) + __pagevec_lru_add(&lru_pvec); + + unlock_page(page); + } + + pagevec_lru_add(&lru_pvec); + return 0; + + fsize_exceeded: + send_sig(SIGXFSZ, current, 0); + too_big: + return -EFBIG; + + add_error: + page_cache_release(pages + loop); + for (loop++; loop < npages; loop++) + __free_page(pages + loop); + return ret; +} + +/*****************************************************************************/ +/* + * check that file shrinkage doesn't leave any VMAs dangling in midair + */ +static int ramfs_nommu_check_mappings(struct inode *inode, + size_t newsize, size_t size) +{ + struct vm_area_struct *vma; + struct prio_tree_iter iter; + + /* search for VMAs that fall within the dead zone */ + vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, + newsize >> PAGE_SHIFT, + (size + PAGE_SIZE - 1) >> PAGE_SHIFT + ) { + /* found one - only interested if it's shared out of the page + * cache */ + if (vma->vm_flags & VM_SHARED) + return -ETXTBSY; /* not quite true, but near enough */ + } + + return 0; +} + +/*****************************************************************************/ +/* + * + */ +static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) +{ + int ret; + + /* assume a truncate from zero size is going to be for the purposes of + * shared mmap */ + if (size == 0) { + if (unlikely(newsize >> 32)) + return -EFBIG; + + return ramfs_nommu_expand_for_mapping(inode, newsize); + } + + /* check that a decrease in size doesn't cut off any shared mappings */ + if (newsize < size) { + ret = ramfs_nommu_check_mappings(inode, newsize, size); + if (ret < 0) + return ret; + } + + ret = vmtruncate(inode, size); + + return ret; +} + +/*****************************************************************************/ +/* + * handle a change of attributes + * - we're specifically interested in a change of size + */ +static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) +{ + struct inode *inode = dentry->d_inode; + unsigned int old_ia_valid = ia->ia_valid; + int ret = 0; + + /* by providing our own setattr() method, we skip this quotaism */ + if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) || + (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid)) + ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0; + + /* pick out size-changing events */ + if (ia->ia_valid & ATTR_SIZE) { + loff_t size = i_size_read(inode); + if (ia->ia_size != size) { + ret = ramfs_nommu_resize(inode, ia->ia_size, size); + if (ret < 0 || ia->ia_valid == ATTR_SIZE) + goto out; + } else { + /* we skipped the truncate but must still update + * timestamps + */ + ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; + } + } + + ret = inode_setattr(inode, ia); + out: + ia->ia_valid = old_ia_valid; + return ret; +} + +/*****************************************************************************/ +/* + * try to determine where a shared mapping can be made + * - we require that: + * - the pages to be mapped must exist + * - the pages be physically contiguous in sequence + */ +unsigned long ramfs_nommu_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + unsigned long maxpages, lpages, nr, loop, ret; + struct inode *inode = file->f_dentry->d_inode; + struct page **pages = NULL, **ptr, *page; + loff_t isize; + + if (!(flags & MAP_SHARED)) + return addr; + + /* the mapping mustn't extend beyond the EOF */ + lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; + isize = i_size_read(inode); + + ret = -EINVAL; + maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (pgoff >= maxpages) + goto out; + + if (maxpages - pgoff < lpages) + goto out; + + /* gang-find the pages */ + ret = -ENOMEM; + pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto out; + + nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); + if (nr != lpages) + goto out; /* leave if some pages were missing */ + + /* check the pages for physical adjacency */ + ptr = pages; + page = *ptr++; + page++; + for (loop = lpages; loop > 1; loop--) + if (*ptr++ != page++) + goto out; + + /* okay - all conditions fulfilled */ + ret = (unsigned long) page_address(pages[0]); + + out: + if (pages) { + ptr = pages; + for (loop = lpages; loop > 0; loop--) + put_page(*ptr++); + kfree(pages); + } + + return ret; +} + +/*****************************************************************************/ +/* + * set up a mapping + */ +int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) +{ + return 0; +} diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 0a88917..c66bd5e 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -34,13 +34,12 @@ #include <linux/ramfs.h> #include <asm/uaccess.h> +#include "internal.h" /* some random number */ #define RAMFS_MAGIC 0x858458f6 static struct super_operations ramfs_ops; -static struct address_space_operations ramfs_aops; -static struct inode_operations ramfs_file_inode_operations; static struct inode_operations ramfs_dir_inode_operations; static struct backing_dev_info ramfs_backing_dev_info = { @@ -142,25 +141,6 @@ static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char * return error; } -static struct address_space_operations ramfs_aops = { - .readpage = simple_readpage, - .prepare_write = simple_prepare_write, - .commit_write = simple_commit_write -}; - -struct file_operations ramfs_file_operations = { - .read = generic_file_read, - .write = generic_file_write, - .mmap = generic_file_mmap, - .fsync = simple_sync_file, - .sendfile = generic_file_sendfile, - .llseek = generic_file_llseek, -}; - -static struct inode_operations ramfs_file_inode_operations = { - .getattr = simple_getattr, -}; - static struct inode_operations ramfs_dir_inode_operations = { .create = ramfs_create, .lookup = simple_lookup, diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h new file mode 100644 index 0000000..272c8a7 --- /dev/null +++ b/fs/ramfs/internal.h @@ -0,0 +1,15 @@ +/* internal.h: ramfs internal definitions + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + + +extern struct address_space_operations ramfs_aops; +extern struct file_operations ramfs_file_operations; +extern struct inode_operations ramfs_file_inode_operations; diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 6183eab..cb03bbe 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -216,4 +216,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include <asm-generic/atomic.h> #endif /* _ALPHA_ATOMIC_H */ diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h index 680f7ec..9dc7256 100644 --- a/include/asm-alpha/dma-mapping.h +++ b/include/asm-alpha/dma-mapping.h @@ -16,7 +16,7 @@ #define dma_free_coherent(dev, size, va, addr) \ pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr) #define dma_map_page(dev, page, off, size, dir) \ - pci_map_single(alpha_gendev_to_pci(dev), page, off, size, dir) + pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir) #define dma_unmap_page(dev, addr, size, dir) \ pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir) #define dma_map_sg(dev, sg, nents, dir) \ diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h index c0593f9..7bb6a36 100644 --- a/include/asm-alpha/hardirq.h +++ b/include/asm-alpha/hardirq.h @@ -13,6 +13,8 @@ typedef struct { #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +void ack_bad_irq(unsigned int irq); + #define HARDIRQ_BITS 12 /* diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h index eb9c279..f643953 100644 --- a/include/asm-alpha/mman.h +++ b/include/asm-alpha/mman.h @@ -42,6 +42,7 @@ #define MADV_WILLNEED 3 /* will need these pages */ #define MADV_SPACEAVAIL 5 /* ensure resources are available */ #define MADV_DONTNEED 6 /* don't need these pages */ +#define MADV_REMOVE 7 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index d586f65..f72b633 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -205,5 +205,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif #endif diff --git a/include/asm-arm/mman.h b/include/asm-arm/mman.h index 8e4f69c..f0bebca 100644 --- a/include/asm-arm/mman.h +++ b/include/asm-arm/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h index a47cadc..3074b0e 100644 --- a/include/asm-arm26/atomic.h +++ b/include/asm-arm26/atomic.h @@ -118,5 +118,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif #endif diff --git a/include/asm-arm26/mman.h b/include/asm-arm26/mman.h index cc27b82..0ed7780 100644 --- a/include/asm-arm26/mman.h +++ b/include/asm-arm26/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h index 683b05a..2df2c7a 100644 --- a/include/asm-cris/atomic.h +++ b/include/asm-cris/atomic.h @@ -156,4 +156,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif diff --git a/include/asm-cris/mman.h b/include/asm-cris/mman.h index 8570e72..5a382b8 100644 --- a/include/asm-cris/mman.h +++ b/include/asm-cris/mman.h @@ -37,6 +37,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index f6539ff..3f54fea 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -426,4 +426,5 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#include <asm-generic/atomic.h> #endif /* _ASM_ATOMIC_H */ diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h index 9feff4c..fca9d90 100644 --- a/include/asm-frv/futex.h +++ b/include/asm-frv/futex.h @@ -7,47 +7,7 @@ #include <asm/errno.h> #include <asm/uaccess.h> -static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) -{ - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret; - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - inc_preempt_count(); - - switch (op) { - case FUTEX_OP_SET: - case FUTEX_OP_ADD: - case FUTEX_OP_OR: - case FUTEX_OP_ANDN: - case FUTEX_OP_XOR: - default: - ret = -ENOSYS; - } - - dec_preempt_count(); - - if (!ret) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; - } - } - return ret; -} +extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); #endif #endif diff --git a/include/asm-frv/mman.h b/include/asm-frv/mman.h index c684720..8af4a41 100644 --- a/include/asm-frv/mman.h +++ b/include/asm-frv/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h index d407bde..6736689 100644 --- a/include/asm-frv/signal.h +++ b/include/asm-frv/signal.h @@ -151,7 +151,6 @@ typedef struct sigaltstack { size_t ss_size; } stack_t; -extern int do_signal(struct pt_regs *regs, sigset_t *oldset); #define ptrace_signal_deliver(regs, cookie) do { } while (0) #ifdef __KERNEL__ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h new file mode 100644 index 0000000..e0a28b9 --- /dev/null +++ b/include/asm-generic/atomic.h @@ -0,0 +1,116 @@ +#ifndef _ASM_GENERIC_ATOMIC_H +#define _ASM_GENERIC_ATOMIC_H +/* + * Copyright (C) 2005 Silicon Graphics, Inc. + * Christoph Lameter <clameter@sgi.com> + * + * Allows to provide arch independent atomic definitions without the need to + * edit all arch specific atomic.h files. + */ + + +/* + * Suppport for atomic_long_t + * + * Casts for parameters are avoided for existing atomic functions in order to + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the + * macros of a platform may have. + */ + +#if BITS_PER_LONG == 64 + +typedef atomic64_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_sub(i, v); +} + +#else + +typedef atomic_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic_t *v = (atomic_t *)l; + + atomic_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_sub(i, v); +} + +#endif +#endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 094d491..35de20c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -10,6 +10,8 @@ #define ALIGN_FUNCTION() . = ALIGN(8) #define RODATA \ + . = ALIGN(4096); \ + __start_rodata = .; \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ *(.rodata) *(.rodata.*) \ *(__vermagic) /* Kernel version magic */ \ @@ -74,6 +76,8 @@ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ *(__ksymtab_strings) \ } \ + __end_rodata = .; \ + . = ALIGN(4096); \ \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h index f23d868..d891541 100644 --- a/include/asm-h8300/atomic.h +++ b/include/asm-h8300/atomic.h @@ -137,4 +137,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/include/asm-h8300/irq.h b/include/asm-h8300/irq.h index 5027181..73065f5 100644 --- a/include/asm-h8300/irq.h +++ b/include/asm-h8300/irq.h @@ -61,11 +61,6 @@ static __inline__ int irq_canonicalize(int irq) extern void enable_irq(unsigned int); extern void disable_irq(unsigned int); - -/* - * Some drivers want these entry points - */ -#define enable_irq_nosync(x) enable_irq(x) #define disable_irq_nosync(x) disable_irq(x) struct irqaction; diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h index 63f727a..744a8fb 100644 --- a/include/asm-h8300/mman.h +++ b/include/asm-h8300/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index c68557a..7a5472d 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -254,4 +254,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 4807aa1..65679ac 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h @@ -332,9 +332,9 @@ static inline unsigned long __ffs(unsigned long word) * Returns the bit-number of the first set bit, not the number of the byte * containing a bit. */ -static inline int find_first_bit(const unsigned long *addr, unsigned size) +static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) { - int x = 0; + unsigned x = 0; while (x < size) { unsigned long val = *addr++; @@ -367,11 +367,6 @@ static inline unsigned long ffz(unsigned long word) return word; } -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) #define fls64(x) generic_fls64(x) #ifdef __KERNEL__ @@ -415,6 +410,23 @@ static inline int ffs(int x) } /** + * fls - find last bit set + * @x: the word to search + * + * This is defined the same way as ffs. + */ +static inline int fls(int x) +{ + int r; + + __asm__("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return r+1; +} + +/** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index ea54540..50233e0 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h @@ -8,9 +8,6 @@ * <rreilova@ececs.uc.edu> * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). - * - * Pentium III FXSR, SSE support - * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* @@ -76,25 +73,7 @@ static void __init check_fpu(void) return; } -/* Enable FXSR and company _before_ testing for FP problems. */ - /* - * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. - */ - if (offsetof(struct task_struct, thread.i387.fxsave) & 15) { - extern void __buggy_fxsr_alignment(void); - __buggy_fxsr_alignment(); - } - if (cpu_has_fxsr) { - printk(KERN_INFO "Enabling fast FPU save and restore... "); - set_in_cr4(X86_CR4_OSFXSR); - printk("done.\n"); - } - if (cpu_has_xmm) { - printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... "); - set_in_cr4(X86_CR4_OSXMMEXCPT); - printk("done.\n"); - } - +/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */ /* Test for the divl bug.. */ __asm__("fninit\n\t" "fldl %1\n\t" diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h index 2ea36de..7199f7b 100644 --- a/include/asm-i386/cacheflush.h +++ b/include/asm-i386/cacheflush.h @@ -31,4 +31,8 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot); void kernel_map_pages(struct page *page, int numpages, int enable); #endif +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +#endif + #endif /* _I386_CACHEFLUSH_H */ diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 29b851a..494e73b 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h @@ -15,9 +15,6 @@ #include <asm/mmu.h> extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; -DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); - -#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu)) DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); @@ -29,6 +26,11 @@ struct Xgt_desc_struct { extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS]; +static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) +{ + return ((struct desc_struct *)cpu_gdt_descr[cpu].address); +} + #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h index ba936d4..18b19a7 100644 --- a/include/asm-i386/mach-bigsmp/mach_apic.h +++ b/include/asm-i386/mach-bigsmp/mach_apic.h @@ -1,17 +1,10 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H -#include <asm/smp.h> - -#define SEQUENTIAL_APICID -#ifdef SEQUENTIAL_APICID -#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\ - ((phys_apic<<2) & (~0xf)) ) -#elif CLUSTERED_APICID -#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\ - ((phys_apic) & (~0xf)) ) -#endif - -#define NO_BALANCE_IRQ (1) + + +extern u8 bios_cpu_apicid[]; + +#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) #define esr_disable (1) static inline int apic_id_registered(void) @@ -19,7 +12,6 @@ static inline int apic_id_registered(void) return (1); } -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) /* Round robin the irqs amoung the online cpus */ static inline cpumask_t target_cpus(void) { @@ -32,29 +24,34 @@ static inline cpumask_t target_cpus(void) } while (cpu >= NR_CPUS); return cpumask_of_cpu(cpu); } -#define TARGET_CPUS (target_cpus()) -#define INT_DELIVERY_MODE dest_Fixed -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ +#undef APIC_DEST_LOGICAL +#define APIC_DEST_LOGICAL 0 +#define TARGET_CPUS (target_cpus()) +#define APIC_DFR_VALUE (APIC_DFR_FLAT) +#define INT_DELIVERY_MODE (dest_Fixed) +#define INT_DEST_MODE (0) /* phys delivery to target proc */ +#define NO_BALANCE_IRQ (0) +#define WAKE_SECONDARY_VIA_INIT + static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { - return 0; + return (0); } -/* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long check_apicid_present(int bit) { - return 1; + return (1); } -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline unsigned long calculate_ldr(unsigned long old) +static inline unsigned long calculate_ldr(int cpu) { - unsigned long id; - id = xapic_phys_to_log_apicid(hard_smp_processor_id()); - return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id)); + unsigned long val, id; + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + id = xapic_phys_to_log_apicid(cpu); + val |= SET_APIC_LOGICAL_ID(id); + return val; } /* @@ -67,37 +64,35 @@ static inline unsigned long calculate_ldr(unsigned long old) static inline void init_apic_ldr(void) { unsigned long val; + int cpu = smp_processor_id(); apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val = calculate_ldr(val); + val = calculate_ldr(cpu); apic_write_around(APIC_LDR, val); } static inline void clustered_apic_check(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Cluster", nr_ioapics); + "Physflat", nr_ioapics); } static inline int multi_timer_check(int apic, int irq) { - return 0; + return (0); } static inline int apicid_to_node(int logical_apicid) { - return 0; + return (0); } -extern u8 bios_cpu_apicid[]; - static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; + return (int) bios_cpu_apicid[mps_cpu]; + + return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) @@ -109,10 +104,10 @@ extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; - } + if (cpu >= NR_CPUS) + return BAD_APICID; + return cpu_physical_id(cpu); +} static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) @@ -128,11 +123,9 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xFUL); + return physids_promote(0xFFL); } -#define WAKE_SECONDARY_VIA_INIT - static inline void setup_portio_remap(void) { } diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h index 23e58b3..a58ab5a 100644 --- a/include/asm-i386/mach-bigsmp/mach_apicdef.h +++ b/include/asm-i386/mach-bigsmp/mach_apicdef.h @@ -1,11 +1,11 @@ #ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H -#define APIC_ID_MASK (0x0F<<24) +#define APIC_ID_MASK (0xFF<<24) static inline unsigned get_apic_id(unsigned long x) { - return (((x)>>24)&0x0F); + return (((x)>>24)&0xFF); } #define GET_APIC_ID(x) get_apic_id(x) diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h index 196619a..ba4941e 100644 --- a/include/asm-i386/mman.h +++ b/include/asm-i386/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 620a906..74f595d 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h @@ -76,11 +76,6 @@ static inline int pfn_to_nid(unsigned long pfn) * Following are macros that each numa implmentation must define. */ -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) - #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) \ ({ \ diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h index eb7f2b4..424661d 100644 --- a/include/asm-i386/module.h +++ b/include/asm-i386/module.h @@ -52,8 +52,10 @@ struct mod_arch_specific #define MODULE_PROC_FAMILY "CYRIXIII " #elif defined CONFIG_MVIAC3_2 #define MODULE_PROC_FAMILY "VIAC3-2 " -#elif CONFIG_MGEODEGX1 +#elif defined CONFIG_MGEODEGX1 #define MODULE_PROC_FAMILY "GEODEGX1 " +#elif defined CONFIG_MGEODE_LX +#define MODULE_PROC_FAMILY "GEODE " #else #error unknown processor family #endif diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h index a961093..76feedf 100644 --- a/include/asm-i386/mpspec_def.h +++ b/include/asm-i386/mpspec_def.h @@ -75,7 +75,7 @@ struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; - unsigned char mpc_bustype[6] __attribute((packed)); + unsigned char mpc_bustype[6]; }; /* List of Bus Type string values, Intel MP Spec. */ diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h index bb5ff5b..faf9953 100644 --- a/include/asm-i386/segment.h +++ b/include/asm-i386/segment.h @@ -91,6 +91,20 @@ #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) +/* The PnP BIOS entries in the GDT */ +#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) +#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) +#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) +#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) +#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) + +/* The PnP BIOS selectors */ +#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ +#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ +#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ +#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ +#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ + /* * The interrupt descriptor table has room for 256 idt's, * the global descriptor table is dependent on the number diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 772f85d..9c0593b 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -54,23 +54,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ ); } while(0) #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) -#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) - -static inline unsigned long _get_base(char * addr) -{ - unsigned long __base; - __asm__("movb %3,%%dh\n\t" - "movb %2,%%dl\n\t" - "shll $16,%%edx\n\t" - "movw %1,%%dx" - :"=&d" (__base) - :"m" (*((addr)+2)), - "m" (*((addr)+4)), - "m" (*((addr)+7))); - return __base; -} - -#define get_base(ldt) _get_base( ((char *)&(ldt)) ) +#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) /* * Load a segment. Fall back on loading the zero @@ -140,6 +124,19 @@ static inline unsigned long _get_base(char * addr) :"=r" (__dummy)); \ __dummy; \ }) + +#define read_cr4_safe() ({ \ + unsigned int __dummy; \ + /* This could fault if %cr4 does not exist */ \ + __asm__("1: movl %%cr4, %0 \n" \ + "2: \n" \ + ".section __ex_table,\"a\" \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + : "=r" (__dummy): "0" (0)); \ + __dummy; \ +}) + #define write_cr4(x) \ __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); #define stts() write_cr0(8 | read_cr0()) diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index 0f92e78..fe38b9a 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -256,7 +256,7 @@ #define __NR_io_submit 248 #define __NR_io_cancel 249 #define __NR_fadvise64 250 -#define __NR_set_zone_reclaim 251 +/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ #define __NR_exit_group 252 #define __NR_lookup_dcookie 253 #define __NR_epoll_create 254 diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 2fbebf8..15cf798 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -192,4 +192,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h index 1c0a73a..828beb2 100644 --- a/include/asm-ia64/mman.h +++ b/include/asm-ia64/mman.h @@ -43,6 +43,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 6d96a67..2bf5434 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -265,7 +265,7 @@ #define __NR_keyctl 1273 #define __NR_ioprio_set 1274 #define __NR_ioprio_get 1275 -#define __NR_set_zone_reclaim 1276 +/* 1276 is available for reuse (was briefly sys_set_zone_reclaim) */ #define __NR_inotify_init 1277 #define __NR_inotify_add_watch 1278 #define __NR_inotify_rm_watch 1279 diff --git a/include/asm-m32r/assembler.h b/include/asm-m32r/assembler.h index e1dff9d..b7f4d8a 100644 --- a/include/asm-m32r/assembler.h +++ b/include/asm-m32r/assembler.h @@ -52,7 +52,7 @@ or3 \reg, \reg, #low(\x) .endm -#if !defined(CONFIG_CHIP_M32102) +#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) #define STI(reg) STI_M reg .macro STI_M reg setpsw #0x40 -> nop @@ -64,7 +64,7 @@ clrpsw #0x40 -> nop ; WORKAROUND: "-> nop" is a workaround for the M32700(TS1). .endm -#else /* CONFIG_CHIP_M32102 */ +#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #define STI(reg) STI_M reg .macro STI_M reg mvfc \reg, psw @@ -191,12 +191,12 @@ and \reg, sp .endm -#if !defined(CONFIG_CHIP_M32102) +#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) .macro SWITCH_TO_KERNEL_STACK ; switch to kernel stack (spi) clrpsw #0x80 -> nop .endm -#else /* CONFIG_CHIP_M32102 */ +#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ .macro SWITCH_TO_KERNEL_STACK push r0 ; save r0 for working mvfc r0, psw @@ -218,7 +218,7 @@ .fillinsn 2: .endm -#endif /* CONFIG_CHIP_M32102 */ +#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index ef1fb8e..7076127 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h @@ -313,4 +313,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/include/asm-m32r/cacheflush.h b/include/asm-m32r/cacheflush.h index 46fc4c3..e57427b 100644 --- a/include/asm-m32r/cacheflush.h +++ b/include/asm-m32r/cacheflush.h @@ -7,7 +7,7 @@ extern void _flush_cache_all(void); extern void _flush_cache_copyback_all(void); -#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) +#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) diff --git a/include/asm-m32r/irq.h b/include/asm-m32r/irq.h index 8ed7796..ca94395 100644 --- a/include/asm-m32r/irq.h +++ b/include/asm-m32r/irq.h @@ -65,6 +65,22 @@ #define NR_IRQS \ (OPSPUT_NUM_CPU_IRQ + OPSPUT_NUM_PLD_IRQ \ + OPSPUT_NUM_LCD_PLD_IRQ + OPSPUT_NUM_LAN_PLD_IRQ) + +#elif defined(CONFIG_PLAT_M32104UT) +/* + * IRQ definitions for M32104UT + * M32104 Chip: 64 interrupts + * ICU of M32104UT-on-board PLD: 32 interrupts cascaded to INT1# chip pin + */ +#define M32104UT_NUM_CPU_IRQ (64) +#define M32104UT_NUM_PLD_IRQ (32) +#define M32104UT_IRQ_BASE 0 +#define M32104UT_CPU_IRQ_BASE M32104UT_IRQ_BASE +#define M32104UT_PLD_IRQ_BASE (M32104UT_CPU_IRQ_BASE + M32104UT_NUM_CPU_IRQ) + +#define NR_IRQS \ + (M32104UT_NUM_CPU_IRQ + M32104UT_NUM_PLD_IRQ) + #else #define NR_IRQS 64 #endif diff --git a/include/asm-m32r/m32102.h b/include/asm-m32r/m32102.h index cb98101..a1f0d1f 100644 --- a/include/asm-m32r/m32102.h +++ b/include/asm-m32r/m32102.h @@ -11,7 +11,11 @@ /*======================================================================* * Special Function Register *======================================================================*/ +#if !defined(CONFIG_CHIP_M32104) #define M32R_SFR_OFFSET (0x00E00000) /* 0x00E00000-0x00EFFFFF 1[MB] */ +#else +#define M32R_SFR_OFFSET (0x00700000) /* 0x00700000-0x007FFFFF 1[MB] */ +#endif /* * Clock and Power Management registers. @@ -100,7 +104,7 @@ #define M32R_MFT5RLD_PORTL (0x0C+M32R_MFT5_OFFSET) /* MFT4 reload */ #define M32R_MFT5CMPRLD_PORTL (0x10+M32R_MFT5_OFFSET) /* MFT4 compare reload */ -#ifdef CONFIG_CHIP_M32700 +#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32104) #define M32R_MFTCR_MFT0MSK (1UL<<31) /* b0 */ #define M32R_MFTCR_MFT1MSK (1UL<<30) /* b1 */ #define M32R_MFTCR_MFT2MSK (1UL<<29) /* b2 */ @@ -113,7 +117,7 @@ #define M32R_MFTCR_MFT3EN (1UL<<20) /* b11 */ #define M32R_MFTCR_MFT4EN (1UL<<19) /* b12 */ #define M32R_MFTCR_MFT5EN (1UL<<18) /* b13 */ -#else /* not CONFIG_CHIP_M32700 */ +#else /* not CONFIG_CHIP_M32700 && not CONFIG_CHIP_M32104 */ #define M32R_MFTCR_MFT0MSK (1UL<<15) /* b16 */ #define M32R_MFTCR_MFT1MSK (1UL<<14) /* b17 */ #define M32R_MFTCR_MFT2MSK (1UL<<13) /* b18 */ @@ -126,7 +130,7 @@ #define M32R_MFTCR_MFT3EN (1UL<<4) /* b27 */ #define M32R_MFTCR_MFT4EN (1UL<<3) /* b28 */ #define M32R_MFTCR_MFT5EN (1UL<<2) /* b29 */ -#endif /* not CONFIG_CHIP_M32700 */ +#endif /* not CONFIG_CHIP_M32700 && not CONFIG_CHIP_M32104 */ #define M32R_MFTMOD_CC_MASK (1UL<<15) /* b16 */ #define M32R_MFTMOD_TCCR (1UL<<13) /* b18 */ @@ -241,8 +245,24 @@ #define M32R_IRQ_MFT1 (17) /* MFT1 */ #define M32R_IRQ_MFT2 (18) /* MFT2 */ #define M32R_IRQ_MFT3 (19) /* MFT3 */ -#define M32R_IRQ_MFT4 (20) /* MFT4 */ -#define M32R_IRQ_MFT5 (21) /* MFT5 */ +#ifdef CONFIG_CHIP_M32104 +#define M32R_IRQ_MFTX0 (24) /* MFTX0 */ +#define M32R_IRQ_MFTX1 (25) /* MFTX1 */ +#define M32R_IRQ_DMA0 (32) /* DMA0 */ +#define M32R_IRQ_DMA1 (33) /* DMA1 */ +#define M32R_IRQ_DMA2 (34) /* DMA2 */ +#define M32R_IRQ_DMA3 (35) /* DMA3 */ +#define M32R_IRQ_SIO0_R (40) /* SIO0 send */ +#define M32R_IRQ_SIO0_S (41) /* SIO0 receive */ +#define M32R_IRQ_SIO1_R (42) /* SIO1 send */ +#define M32R_IRQ_SIO1_S (43) /* SIO1 receive */ +#define M32R_IRQ_SIO2_R (44) /* SIO2 send */ +#define M32R_IRQ_SIO2_S (45) /* SIO2 receive */ +#define M32R_IRQ_SIO3_R (46) /* SIO3 send */ +#define M32R_IRQ_SIO3_S (47) /* SIO3 receive */ +#define M32R_IRQ_ADC (56) /* ADC */ +#define M32R_IRQ_PC (57) /* PC */ +#else /* ! M32104 */ #define M32R_IRQ_DMA0 (32) /* DMA0 */ #define M32R_IRQ_DMA1 (33) /* DMA1 */ #define M32R_IRQ_SIO0_R (48) /* SIO0 send */ @@ -255,6 +275,7 @@ #define M32R_IRQ_SIO3_S (55) /* SIO3 receive */ #define M32R_IRQ_SIO4_R (56) /* SIO4 send */ #define M32R_IRQ_SIO4_S (57) /* SIO4 receive */ +#endif /* ! M32104 */ #ifdef CONFIG_SMP #define M32R_IRQ_IPI0 (56) @@ -281,15 +302,12 @@ #define M32R_FPGA_VERSION0_PORTL (0x30+M32R_FPGA_TOP) #define M32R_FPGA_VERSION1_PORTL (0x34+M32R_FPGA_TOP) +#endif /* CONFIG_SMP */ + #ifndef __ASSEMBLY__ -/* For NETDEV WATCHDOG */ typedef struct { unsigned long icucr; /* ICU Control Register */ } icu_data_t; - -extern icu_data_t icu_data[]; #endif -#endif /* CONFIG_SMP */ - #endif /* _M32102_H_ */ diff --git a/include/asm-m32r/m32104ut/m32104ut_pld.h b/include/asm-m32r/m32104ut/m32104ut_pld.h new file mode 100644 index 0000000..a4eac20 --- /dev/null +++ b/include/asm-m32r/m32104ut/m32104ut_pld.h @@ -0,0 +1,163 @@ +/* + * include/asm/m32104ut/m32104ut_pld.h + * + * Definitions for Programable Logic Device(PLD) on M32104UT board. + * Based on m32700ut_pld.h + * + * Copyright (c) 2002 Takeo Takahashi + * Copyright (c) 2005 Naoto Sugai + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + */ + +#ifndef _M32104UT_M32104UT_PLD_H +#define _M32104UT_M32104UT_PLD_H + +#include <linux/config.h> + +#if defined(CONFIG_PLAT_M32104UT) +#define PLD_PLAT_BASE 0x02c00000 +#else +#error "no platform configuration" +#endif + +#ifndef __ASSEMBLY__ +/* + * C functions use non-cache address. + */ +#define PLD_BASE (PLD_PLAT_BASE /* + NONCACHE_OFFSET */) +#define __reg8 (volatile unsigned char *) +#define __reg16 (volatile unsigned short *) +#define __reg32 (volatile unsigned int *) +#else +#define PLD_BASE (PLD_PLAT_BASE + NONCACHE_OFFSET) +#define __reg8 +#define __reg16 +#define __reg32 +#endif /* __ASSEMBLY__ */ + +/* CFC */ +#define PLD_CFRSTCR __reg16(PLD_BASE + 0x0000) +#define PLD_CFSTS __reg16(PLD_BASE + 0x0002) +#define PLD_CFIMASK __reg16(PLD_BASE + 0x0004) +#define PLD_CFBUFCR __reg16(PLD_BASE + 0x0006) + +/* MMC */ +#define PLD_MMCCR __reg16(PLD_BASE + 0x4000) +#define PLD_MMCMOD __reg16(PLD_BASE + 0x4002) +#define PLD_MMCSTS __reg16(PLD_BASE + 0x4006) +#define PLD_MMCBAUR __reg16(PLD_BASE + 0x400a) +#define PLD_MMCCMDBCUT __reg16(PLD_BASE + 0x400c) +#define PLD_MMCCDTBCUT __reg16(PLD_BASE + 0x400e) +#define PLD_MMCDET __reg16(PLD_BASE + 0x4010) +#define PLD_MMCWP __reg16(PLD_BASE + 0x4012) +#define PLD_MMCWDATA __reg16(PLD_BASE + 0x5000) +#define PLD_MMCRDATA __reg16(PLD_BASE + 0x6000) +#define PLD_MMCCMDDATA __reg16(PLD_BASE + 0x7000) +#define PLD_MMCRSPDATA __reg16(PLD_BASE + 0x7006) + +/* ICU + * ICUISTS: status register + * ICUIREQ0: request register + * ICUIREQ1: request register + * ICUCR3: control register for CFIREQ# interrupt + * ICUCR4: control register for CFC Card insert interrupt + * ICUCR5: control register for CFC Card eject interrupt + * ICUCR6: control register for external interrupt + * ICUCR11: control register for MMC Card insert/eject interrupt + * ICUCR13: control register for SC error interrupt + * ICUCR14: control register for SC receive interrupt + * ICUCR15: control register for SC send interrupt + */ + +#define PLD_IRQ_INT0 (M32104UT_PLD_IRQ_BASE + 0) /* None */ +#define PLD_IRQ_CFIREQ (M32104UT_PLD_IRQ_BASE + 3) /* CF IREQ */ +#define PLD_IRQ_CFC_INSERT (M32104UT_PLD_IRQ_BASE + 4) /* CF Insert */ +#define PLD_IRQ_CFC_EJECT (M32104UT_PLD_IRQ_BASE + 5) /* CF Eject */ +#define PLD_IRQ_EXINT (M32104UT_PLD_IRQ_BASE + 6) /* EXINT */ +#define PLD_IRQ_MMCCARD (M32104UT_PLD_IRQ_BASE + 11) /* MMC Insert/Eject */ +#define PLD_IRQ_SC_ERROR (M32104UT_PLD_IRQ_BASE + 13) /* SC error */ +#define PLD_IRQ_SC_RCV (M32104UT_PLD_IRQ_BASE + 14) /* SC receive */ +#define PLD_IRQ_SC_SND (M32104UT_PLD_IRQ_BASE + 15) /* SC send */ + +#define PLD_ICUISTS __reg16(PLD_BASE + 0x8002) +#define PLD_ICUISTS_VECB_MASK (0xf000) +#define PLD_ICUISTS_VECB(x) ((x) & PLD_ICUISTS_VECB_MASK) +#define PLD_ICUISTS_ISN_MASK (0x07c0) +#define PLD_ICUISTS_ISN(x) ((x) & PLD_ICUISTS_ISN_MASK) +#define PLD_ICUCR3 __reg16(PLD_BASE + 0x8104) +#define PLD_ICUCR4 __reg16(PLD_BASE + 0x8106) +#define PLD_ICUCR5 __reg16(PLD_BASE + 0x8108) +#define PLD_ICUCR6 __reg16(PLD_BASE + 0x810a) +#define PLD_ICUCR11 __reg16(PLD_BASE + 0x8114) +#define PLD_ICUCR13 __reg16(PLD_BASE + 0x8118) +#define PLD_ICUCR14 __reg16(PLD_BASE + 0x811a) +#define PLD_ICUCR15 __reg16(PLD_BASE + 0x811c) +#define PLD_ICUCR_IEN (0x1000) +#define PLD_ICUCR_IREQ (0x0100) +#define PLD_ICUCR_ISMOD00 (0x0000) /* Low edge */ +#define PLD_ICUCR_ISMOD01 (0x0010) /* Low level */ +#define PLD_ICUCR_ISMOD02 (0x0020) /* High edge */ +#define PLD_ICUCR_ISMOD03 (0x0030) /* High level */ +#define PLD_ICUCR_ILEVEL0 (0x0000) +#define PLD_ICUCR_ILEVEL1 (0x0001) +#define PLD_ICUCR_ILEVEL2 (0x0002) +#define PLD_ICUCR_ILEVEL3 (0x0003) +#define PLD_ICUCR_ILEVEL4 (0x0004) +#define PLD_ICUCR_ILEVEL5 (0x0005) +#define PLD_ICUCR_ILEVEL6 (0x0006) +#define PLD_ICUCR_ILEVEL7 (0x0007) + +/* Power Control of MMC and CF */ +#define PLD_CPCR __reg16(PLD_BASE + 0x14000) +#define PLD_CPCR_CDP 0x0001 + +/* LED Control + * + * 1: DIP swich side + * 2: Reset switch side + */ +#define PLD_IOLEDCR __reg16(PLD_BASE + 0x14002) +#define PLD_IOLED_1_ON 0x001 +#define PLD_IOLED_1_OFF 0x000 +#define PLD_IOLED_2_ON 0x002 +#define PLD_IOLED_2_OFF 0x000 + +/* DIP Switch + * 0: Write-protect of Flash Memory (0:protected, 1:non-protected) + * 1: - + * 2: - + * 3: - + */ +#define PLD_IOSWSTS __reg16(PLD_BASE + 0x14004) +#define PLD_IOSWSTS_IOSW2 0x0200 +#define PLD_IOSWSTS_IOSW1 0x0100 +#define PLD_IOSWSTS_IOWP0 0x0001 + +/* CRC */ +#define PLD_CRC7DATA __reg16(PLD_BASE + 0x18000) +#define PLD_CRC7INDATA __reg16(PLD_BASE + 0x18002) +#define PLD_CRC16DATA __reg16(PLD_BASE + 0x18004) +#define PLD_CRC16INDATA __reg16(PLD_BASE + 0x18006) +#define PLD_CRC16ADATA __reg16(PLD_BASE + 0x18008) +#define PLD_CRC16AINDATA __reg16(PLD_BASE + 0x1800a) + +/* RTC */ +#define PLD_RTCCR __reg16(PLD_BASE + 0x1c000) +#define PLD_RTCBAUR __reg16(PLD_BASE + 0x1c002) +#define PLD_RTCWRDATA __reg16(PLD_BASE + 0x1c004) +#define PLD_RTCRDDATA __reg16(PLD_BASE + 0x1c006) +#define PLD_RTCRSTODT __reg16(PLD_BASE + 0x1c008) + +/* SIM Card */ +#define PLD_SCCR __reg16(PLD_BASE + 0x38000) +#define PLD_SCMOD __reg16(PLD_BASE + 0x38004) +#define PLD_SCSTS __reg16(PLD_BASE + 0x38006) +#define PLD_SCINTCR __reg16(PLD_BASE + 0x38008) +#define PLD_SCBAUR __reg16(PLD_BASE + 0x3800a) +#define PLD_SCTXB __reg16(PLD_BASE + 0x3800c) +#define PLD_SCRXB __reg16(PLD_BASE + 0x3800e) + +#endif /* _M32104UT_M32104UT_PLD_H */ diff --git a/include/asm-m32r/m32r.h b/include/asm-m32r/m32r.h index ec142be..b133ca6 100644 --- a/include/asm-m32r/m32r.h +++ b/include/asm-m32r/m32r.h @@ -14,7 +14,7 @@ #include <asm/m32r_mp_fpga.h> #elif defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \ || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \ - || defined(CONFIG_CHIP_OPSP) + || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) #include <asm/m32102.h> #endif @@ -43,6 +43,10 @@ #include <asm/m32700ut/m32700ut_pld.h> #endif +#if defined(CONFIG_PLAT_M32104UT) +#include <asm/m32104ut/m32104ut_pld.h> +#endif /* CONFIG_PLAT_M32104 */ + /* * M32R Register */ @@ -122,7 +126,7 @@ #include <asm/page.h> #ifdef CONFIG_MMU -#define NONCACHE_OFFSET __PAGE_OFFSET+0x20000000 +#define NONCACHE_OFFSET (__PAGE_OFFSET + 0x20000000) #else #define NONCACHE_OFFSET __PAGE_OFFSET #endif /* CONFIG_MMU */ diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h index 011f6d9..12e2974 100644 --- a/include/asm-m32r/mman.h +++ b/include/asm-m32r/mman.h @@ -37,6 +37,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 5eee832..dcf619a 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -69,12 +69,12 @@ } while(0) /* Interrupt Control */ -#if !defined(CONFIG_CHIP_M32102) +#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) #define local_irq_enable() \ __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") #define local_irq_disable() \ __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") -#else /* CONFIG_CHIP_M32102 */ +#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ static inline void local_irq_enable(void) { unsigned long tmpreg; @@ -96,7 +96,7 @@ static inline void local_irq_disable(void) "mvtc %0, psw \n\t" : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); } -#endif /* CONFIG_CHIP_M32102 */ +#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #define local_save_flags(x) \ __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) @@ -105,13 +105,13 @@ static inline void local_irq_disable(void) __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ : "r" (x) : "cbit", "memory") -#if !defined(CONFIG_CHIP_M32102) +#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) #define local_irq_save(x) \ __asm__ __volatile__( \ "mvfc %0, psw; \n\t" \ "clrpsw #0x40 -> nop; \n\t" \ : "=r" (x) : /* no input */ : "memory") -#else /* CONFIG_CHIP_M32102 */ +#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #define local_irq_save(x) \ ({ \ unsigned long tmpreg; \ @@ -124,7 +124,7 @@ static inline void local_irq_disable(void) : "=r" (x), "=&r" (tmpreg) \ : : "cbit", "memory"); \ }) -#endif /* CONFIG_CHIP_M32102 */ +#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ #define irqs_disabled() \ ({ \ diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h index ac399e1..39be87c 100644 --- a/include/asm-m32r/unistd.h +++ b/include/asm-m32r/unistd.h @@ -319,7 +319,7 @@ type name(void) \ register long __scno __asm__ ("r7") = __NR_##name; \ register long __res __asm__("r0"); \ __asm__ __volatile__ (\ - "trap #" SYSCALL_VECTOR \ + "trap #" SYSCALL_VECTOR "|| nop"\ : "=r" (__res) \ : "r" (__scno) \ : "memory"); \ @@ -332,7 +332,7 @@ type name(type1 arg1) \ register long __scno __asm__ ("r7") = __NR_##name; \ register long __res __asm__ ("r0") = (long)(arg1); \ __asm__ __volatile__ (\ - "trap #" SYSCALL_VECTOR \ + "trap #" SYSCALL_VECTOR "|| nop"\ : "=r" (__res) \ : "r" (__scno), "0" (__res) \ : "memory"); \ @@ -346,7 +346,7 @@ register long __scno __asm__ ("r7") = __NR_##name; \ register long __arg2 __asm__ ("r1") = (long)(arg2); \ register long __res __asm__ ("r0") = (long)(arg1); \ __asm__ __volatile__ (\ - "trap #" SYSCALL_VECTOR \ + "trap #" SYSCALL_VECTOR "|| nop"\ : "=r" (__res) \ : "r" (__scno), "0" (__res), "r" (__arg2) \ : "memory"); \ @@ -361,7 +361,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \ register long __arg2 __asm__ ("r1") = (long)(arg2); \ register long __res __asm__ ("r0") = (long)(arg1); \ __asm__ __volatile__ (\ - "trap #" SYSCALL_VECTOR \ + "trap #" SYSCALL_VECTOR "|| nop"\ : "=r" (__res) \ : "r" (__scno), "0" (__res), "r" (__arg2), \ "r" (__arg3) \ @@ -378,7 +378,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \ register long __arg2 __asm__ ("r1") = (long)(arg2); \ register long __res __asm__ ("r0") = (long)(arg1); \ __asm__ __volatile__ (\ - "trap #" SYSCALL_VECTOR \ + "trap #" SYSCALL_VECTOR "|| nop"\ : "=r" (__res) \ : "r" (__scno), "0" (__res), "r" (__arg2), \ "r" (__arg3), "r" (__arg4) \ @@ -397,7 +397,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \ register long __arg2 __asm__ ("r1") = (long)(arg2); \ register long __res __asm__ ("r0") = (long)(arg1); \ __asm__ __volatile__ (\ - "trap #" SYSCALL_VECTOR \ + "trap #" SYSCALL_VECTOR "|| nop"\ : "=r" (__res) \ : "r" (__scno), "0" (__res), "r" (__arg2), \ "r" (__arg3), "r" (__arg4), "r" (__arg5) \ diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index e3c962e..b8a4e75 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -157,4 +157,5 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* __ARCH_M68K_ATOMIC __ */ diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h index 1f56990..127ad19 100644 --- a/include/asm-m68k/irq.h +++ b/include/asm-m68k/irq.h @@ -70,8 +70,6 @@ static __inline__ int irq_canonicalize(int irq) extern void (*enable_irq)(unsigned int); extern void (*disable_irq)(unsigned int); - -#define disable_irq_nosync disable_irq #define enable_irq_nosync enable_irq struct pt_regs; diff --git a/include/asm-m68k/mman.h b/include/asm-m68k/mman.h index f831c4e..ea262ab 100644 --- a/include/asm-m68k/mman.h +++ b/include/asm-m68k/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h index 3c1cc15..1702dbe 100644 --- a/include/asm-m68knommu/atomic.h +++ b/include/asm-m68knommu/atomic.h @@ -143,4 +143,5 @@ static inline int atomic_sub_return(int i, atomic_t * v) #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) +#include <asm-generic/atomic.h> #endif /* __ARCH_M68KNOMMU_ATOMIC __ */ diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h index a08fa9b..20c48ec 100644 --- a/include/asm-m68knommu/irq.h +++ b/include/asm-m68knommu/irq.h @@ -84,10 +84,8 @@ extern void (*mach_disable_irq)(unsigned int); /* * Some drivers want these entry points */ -#define enable_irq(x) (mach_enable_irq ? (*mach_enable_irq)(x) : 0) -#define disable_irq(x) (mach_disable_irq ? (*mach_disable_irq)(x) : 0) - -#define enable_irq_nosync(x) enable_irq(x) +#define enable_irq(x) 0 +#define disable_irq(x) do { } while (0) #define disable_irq_nosync(x) disable_irq(x) struct irqaction; diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 55c37c1..92256e4 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -713,4 +713,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include <asm-generic/atomic.h> #endif /* _ASM_ATOMIC_H */ diff --git a/include/asm-mips/mman.h b/include/asm-mips/mman.h index 6206095..dd17c8b 100644 --- a/include/asm-mips/mman.h +++ b/include/asm-mips/mman.h @@ -65,6 +65,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-mips/riscos-syscall.h b/include/asm-mips/riscos-syscall.h deleted file mode 100644 index 4d8eb15..0000000 --- a/include/asm-mips/riscos-syscall.h +++ /dev/null @@ -1,979 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle - */ -#ifndef _ASM_RISCOS_SYSCALL_H -#define _ASM_RISCOS_SYSCALL_H - -/* - * The syscalls 0 - 3999 are reserved for a down to the root syscall - * compatibility with RISC/os and IRIX. We'll see how to deal with the - * various "real" BSD variants like Ultrix, NetBSD ... - */ - -/* - * SVR4 syscalls are in the range from 1 to 999 - */ -#define __NR_SVR4 0 -#define __NR_SVR4_syscall (__NR_SVR4 + 0) -#define __NR_SVR4_exit (__NR_SVR4 + 1) -#define __NR_SVR4_fork (__NR_SVR4 + 2) -#define __NR_SVR4_read (__NR_SVR4 + 3) -#define __NR_SVR4_write (__NR_SVR4 + 4) -#define __NR_SVR4_open (__NR_SVR4 + 5) -#define __NR_SVR4_close (__NR_SVR4 + 6) -#define __NR_SVR4_wait (__NR_SVR4 + 7) -#define __NR_SVR4_creat (__NR_SVR4 + 8) -#define __NR_SVR4_link (__NR_SVR4 + 9) -#define __NR_SVR4_unlink (__NR_SVR4 + 10) -#define __NR_SVR4_exec (__NR_SVR4 + 11) -#define __NR_SVR4_chdir (__NR_SVR4 + 12) -#define __NR_SVR4_gtime (__NR_SVR4 + 13) -#define __NR_SVR4_mknod (__NR_SVR4 + 14) -#define __NR_SVR4_chmod (__NR_SVR4 + 15) -#define __NR_SVR4_chown (__NR_SVR4 + 16) -#define __NR_SVR4_sbreak (__NR_SVR4 + 17) -#define __NR_SVR4_stat (__NR_SVR4 + 18) -#define __NR_SVR4_lseek (__NR_SVR4 + 19) -#define __NR_SVR4_getpid (__NR_SVR4 + 20) -#define __NR_SVR4_mount (__NR_SVR4 + 21) -#define __NR_SVR4_umount (__NR_SVR4 + 22) -#define __NR_SVR4_setuid (__NR_SVR4 + 23) -#define __NR_SVR4_getuid (__NR_SVR4 + 24) -#define __NR_SVR4_stime (__NR_SVR4 + 25) -#define __NR_SVR4_ptrace (__NR_SVR4 + 26) -#define __NR_SVR4_alarm (__NR_SVR4 + 27) -#define __NR_SVR4_fstat (__NR_SVR4 + 28) -#define __NR_SVR4_pause (__NR_SVR4 + 29) -#define __NR_SVR4_utime (__NR_SVR4 + 30) -#define __NR_SVR4_stty (__NR_SVR4 + 31) -#define __NR_SVR4_gtty (__NR_SVR4 + 32) -#define __NR_SVR4_access (__NR_SVR4 + 33) -#define __NR_SVR4_nice (__NR_SVR4 + 34) -#define __NR_SVR4_statfs (__NR_SVR4 + 35) -#define __NR_SVR4_sync (__NR_SVR4 + 36) -#define __NR_SVR4_kill (__NR_SVR4 + 37) -#define __NR_SVR4_fstatfs (__NR_SVR4 + 38) -#define __NR_SVR4_setpgrp (__NR_SVR4 + 39) -#define __NR_SVR4_cxenix (__NR_SVR4 + 40) -#define __NR_SVR4_dup (__NR_SVR4 + 41) -#define __NR_SVR4_pipe (__NR_SVR4 + 42) -#define __NR_SVR4_times (__NR_SVR4 + 43) -#define __NR_SVR4_profil (__NR_SVR4 + 44) -#define __NR_SVR4_plock (__NR_SVR4 + 45) -#define __NR_SVR4_setgid (__NR_SVR4 + 46) -#define __NR_SVR4_getgid (__NR_SVR4 + 47) -#define __NR_SVR4_sig (__NR_SVR4 + 48) -#define __NR_SVR4_msgsys (__NR_SVR4 + 49) -#define __NR_SVR4_sysmips (__NR_SVR4 + 50) -#define __NR_SVR4_sysacct (__NR_SVR4 + 51) -#define __NR_SVR4_shmsys (__NR_SVR4 + 52) -#define __NR_SVR4_semsys (__NR_SVR4 + 53) -#define __NR_SVR4_ioctl (__NR_SVR4 + 54) -#define __NR_SVR4_uadmin (__NR_SVR4 + 55) -#define __NR_SVR4_exch (__NR_SVR4 + 56) -#define __NR_SVR4_utssys (__NR_SVR4 + 57) -#define __NR_SVR4_fsync (__NR_SVR4 + 58) -#define __NR_SVR4_exece (__NR_SVR4 + 59) -#define __NR_SVR4_umask (__NR_SVR4 + 60) -#define __NR_SVR4_chroot (__NR_SVR4 + 61) -#define __NR_SVR4_fcntl (__NR_SVR4 + 62) -#define __NR_SVR4_ulimit (__NR_SVR4 + 63) -#define __NR_SVR4_reserved1 (__NR_SVR4 + 64) -#define __NR_SVR4_reserved2 (__NR_SVR4 + 65) -#define __NR_SVR4_reserved3 (__NR_SVR4 + 66) -#define __NR_SVR4_reserved4 (__NR_SVR4 + 67) -#define __NR_SVR4_reserved5 (__NR_SVR4 + 68) -#define __NR_SVR4_reserved6 (__NR_SVR4 + 69) -#define __NR_SVR4_advfs (__NR_SVR4 + 70) -#define __NR_SVR4_unadvfs (__NR_SVR4 + 71) -#define __NR_SVR4_unused1 (__NR_SVR4 + 72) -#define __NR_SVR4_unused2 (__NR_SVR4 + 73) -#define __NR_SVR4_rfstart (__NR_SVR4 + 74) -#define __NR_SVR4_unused3 (__NR_SVR4 + 75) -#define __NR_SVR4_rdebug (__NR_SVR4 + 76) -#define __NR_SVR4_rfstop (__NR_SVR4 + 77) -#define __NR_SVR4_rfsys (__NR_SVR4 + 78) -#define __NR_SVR4_rmdir (__NR_SVR4 + 79) -#define __NR_SVR4_mkdir (__NR_SVR4 + 80) -#define __NR_SVR4_getdents (__NR_SVR4 + 81) -#define __NR_SVR4_libattach (__NR_SVR4 + 82) -#define __NR_SVR4_libdetach (__NR_SVR4 + 83) -#define __NR_SVR4_sysfs (__NR_SVR4 + 84) -#define __NR_SVR4_getmsg (__NR_SVR4 + 85) -#define __NR_SVR4_putmsg (__NR_SVR4 + 86) -#define __NR_SVR4_poll (__NR_SVR4 + 87) -#define __NR_SVR4_lstat (__NR_SVR4 + 88) -#define __NR_SVR4_symlink (__NR_SVR4 + 89) -#define __NR_SVR4_readlink (__NR_SVR4 + 90) -#define __NR_SVR4_setgroups (__NR_SVR4 + 91) -#define __NR_SVR4_getgroups (__NR_SVR4 + 92) -#define __NR_SVR4_fchmod (__NR_SVR4 + 93) -#define __NR_SVR4_fchown (__NR_SVR4 + 94) -#define __NR_SVR4_sigprocmask (__NR_SVR4 + 95) -#define __NR_SVR4_sigsuspend (__NR_SVR4 + 96) -#define __NR_SVR4_sigaltstack (__NR_SVR4 + 97) -#define __NR_SVR4_sigaction (__NR_SVR4 + 98) -#define __NR_SVR4_sigpending (__NR_SVR4 + 99) -#define __NR_SVR4_setcontext (__NR_SVR4 + 100) -#define __NR_SVR4_evsys (__NR_SVR4 + 101) -#define __NR_SVR4_evtrapret (__NR_SVR4 + 102) -#define __NR_SVR4_statvfs (__NR_SVR4 + 103) -#define __NR_SVR4_fstatvfs (__NR_SVR4 + 104) -#define __NR_SVR4_reserved7 (__NR_SVR4 + 105) -#define __NR_SVR4_nfssys (__NR_SVR4 + 106) -#define __NR_SVR4_waitid (__NR_SVR4 + 107) -#define __NR_SVR4_sigsendset (__NR_SVR4 + 108) -#define __NR_SVR4_hrtsys (__NR_SVR4 + 109) -#define __NR_SVR4_acancel (__NR_SVR4 + 110) -#define __NR_SVR4_async (__NR_SVR4 + 111) -#define __NR_SVR4_priocntlset (__NR_SVR4 + 112) -#define __NR_SVR4_pathconf (__NR_SVR4 + 113) -#define __NR_SVR4_mincore (__NR_SVR4 + 114) -#define __NR_SVR4_mmap (__NR_SVR4 + 115) -#define __NR_SVR4_mprotect (__NR_SVR4 + 116) -#define __NR_SVR4_munmap (__NR_SVR4 + 117) -#define __NR_SVR4_fpathconf (__NR_SVR4 + 118) -#define __NR_SVR4_vfork (__NR_SVR4 + 119) -#define __NR_SVR4_fchdir (__NR_SVR4 + 120) -#define __NR_SVR4_readv (__NR_SVR4 + 121) -#define __NR_SVR4_writev (__NR_SVR4 + 122) -#define __NR_SVR4_xstat (__NR_SVR4 + 123) -#define __NR_SVR4_lxstat (__NR_SVR4 + 124) -#define __NR_SVR4_fxstat (__NR_SVR4 + 125) -#define __NR_SVR4_xmknod (__NR_SVR4 + 126) -#define __NR_SVR4_clocal (__NR_SVR4 + 127) -#define __NR_SVR4_setrlimit (__NR_SVR4 + 128) -#define __NR_SVR4_getrlimit (__NR_SVR4 + 129) -#define __NR_SVR4_lchown (__NR_SVR4 + 130) -#define __NR_SVR4_memcntl (__NR_SVR4 + 131) -#define __NR_SVR4_getpmsg (__NR_SVR4 + 132) -#define __NR_SVR4_putpmsg (__NR_SVR4 + 133) -#define __NR_SVR4_rename (__NR_SVR4 + 134) -#define __NR_SVR4_nuname (__NR_SVR4 + 135) -#define __NR_SVR4_setegid (__NR_SVR4 + 136) -#define __NR_SVR4_sysconf (__NR_SVR4 + 137) -#define __NR_SVR4_adjtime (__NR_SVR4 + 138) -#define __NR_SVR4_sysinfo (__NR_SVR4 + 139) -#define __NR_SVR4_reserved8 (__NR_SVR4 + 140) -#define __NR_SVR4_seteuid (__NR_SVR4 + 141) -#define __NR_SVR4_PYRAMID_statis (__NR_SVR4 + 142) -#define __NR_SVR4_PYRAMID_tuning (__NR_SVR4 + 143) -#define __NR_SVR4_PYRAMID_forcerr (__NR_SVR4 + 144) -#define __NR_SVR4_PYRAMID_mpcntl (__NR_SVR4 + 145) -#define __NR_SVR4_reserved9 (__NR_SVR4 + 146) -#define __NR_SVR4_reserved10 (__NR_SVR4 + 147) -#define __NR_SVR4_reserved11 (__NR_SVR4 + 148) -#define __NR_SVR4_reserved12 (__NR_SVR4 + 149) -#define __NR_SVR4_reserved13 (__NR_SVR4 + 150) -#define __NR_SVR4_reserved14 (__NR_SVR4 + 151) -#define __NR_SVR4_reserved15 (__NR_SVR4 + 152) -#define __NR_SVR4_reserved16 (__NR_SVR4 + 153) -#define __NR_SVR4_reserved17 (__NR_SVR4 + 154) -#define __NR_SVR4_reserved18 (__NR_SVR4 + 155) -#define __NR_SVR4_reserved19 (__NR_SVR4 + 156) -#define __NR_SVR4_reserved20 (__NR_SVR4 + 157) -#define __NR_SVR4_reserved21 (__NR_SVR4 + 158) -#define __NR_SVR4_reserved22 (__NR_SVR4 + 159) -#define __NR_SVR4_reserved23 (__NR_SVR4 + 160) -#define __NR_SVR4_reserved24 (__NR_SVR4 + 161) -#define __NR_SVR4_reserved25 (__NR_SVR4 + 162) -#define __NR_SVR4_reserved26 (__NR_SVR4 + 163) -#define __NR_SVR4_reserved27 (__NR_SVR4 + 164) -#define __NR_SVR4_reserved28 (__NR_SVR4 + 165) -#define __NR_SVR4_reserved29 (__NR_SVR4 + 166) -#define __NR_SVR4_reserved30 (__NR_SVR4 + 167) -#define __NR_SVR4_reserved31 (__NR_SVR4 + 168) -#define __NR_SVR4_reserved32 (__NR_SVR4 + 169) -#define __NR_SVR4_reserved33 (__NR_SVR4 + 170) -#define __NR_SVR4_reserved34 (__NR_SVR4 + 171) -#define __NR_SVR4_reserved35 (__NR_SVR4 + 172) -#define __NR_SVR4_reserved36 (__NR_SVR4 + 173) -#define __NR_SVR4_reserved37 (__NR_SVR4 + 174) -#define __NR_SVR4_reserved38 (__NR_SVR4 + 175) -#define __NR_SVR4_reserved39 (__NR_SVR4 + 176) -#define __NR_SVR4_reserved40 (__NR_SVR4 + 177) -#define __NR_SVR4_reserved41 (__NR_SVR4 + 178) -#define __NR_SVR4_reserved42 (__NR_SVR4 + 179) -#define __NR_SVR4_reserved43 (__NR_SVR4 + 180) -#define __NR_SVR4_reserved44 (__NR_SVR4 + 181) -#define __NR_SVR4_reserved45 (__NR_SVR4 + 182) -#define __NR_SVR4_reserved46 (__NR_SVR4 + 183) -#define __NR_SVR4_reserved47 (__NR_SVR4 + 184) -#define __NR_SVR4_reserved48 (__NR_SVR4 + 185) -#define __NR_SVR4_reserved49 (__NR_SVR4 + 186) -#define __NR_SVR4_reserved50 (__NR_SVR4 + 187) -#define __NR_SVR4_reserved51 (__NR_SVR4 + 188) -#define __NR_SVR4_reserved52 (__NR_SVR4 + 189) -#define __NR_SVR4_reserved53 (__NR_SVR4 + 190) -#define __NR_SVR4_reserved54 (__NR_SVR4 + 191) -#define __NR_SVR4_reserved55 (__NR_SVR4 + 192) -#define __NR_SVR4_reserved56 (__NR_SVR4 + 193) -#define __NR_SVR4_reserved57 (__NR_SVR4 + 194) -#define __NR_SVR4_reserved58 (__NR_SVR4 + 195) -#define __NR_SVR4_reserved59 (__NR_SVR4 + 196) -#define __NR_SVR4_reserved60 (__NR_SVR4 + 197) -#define __NR_SVR4_reserved61 (__NR_SVR4 + 198) -#define __NR_SVR4_reserved62 (__NR_SVR4 + 199) -#define __NR_SVR4_reserved63 (__NR_SVR4 + 200) -#define __NR_SVR4_aread (__NR_SVR4 + 201) -#define __NR_SVR4_awrite (__NR_SVR4 + 202) -#define __NR_SVR4_listio (__NR_SVR4 + 203) -#define __NR_SVR4_mips_acancel (__NR_SVR4 + 204) -#define __NR_SVR4_astatus (__NR_SVR4 + 205) -#define __NR_SVR4_await (__NR_SVR4 + 206) -#define __NR_SVR4_areadv (__NR_SVR4 + 207) -#define __NR_SVR4_awritev (__NR_SVR4 + 208) -#define __NR_SVR4_MIPS_reserved1 (__NR_SVR4 + 209) -#define __NR_SVR4_MIPS_reserved2 (__NR_SVR4 + 210) -#define __NR_SVR4_MIPS_reserved3 (__NR_SVR4 + 211) -#define __NR_SVR4_MIPS_reserved4 (__NR_SVR4 + 212) -#define __NR_SVR4_MIPS_reserved5 (__NR_SVR4 + 213) -#define __NR_SVR4_MIPS_reserved6 (__NR_SVR4 + 214) -#define __NR_SVR4_MIPS_reserved7 (__NR_SVR4 + 215) -#define __NR_SVR4_MIPS_reserved8 (__NR_SVR4 + 216) -#define __NR_SVR4_MIPS_reserved9 (__NR_SVR4 + 217) -#define __NR_SVR4_MIPS_reserved10 (__NR_SVR4 + 218) -#define __NR_SVR4_MIPS_reserved11 (__NR_SVR4 + 219) -#define __NR_SVR4_MIPS_reserved12 (__NR_SVR4 + 220) -#define __NR_SVR4_CDC_reserved1 (__NR_SVR4 + 221) -#define __NR_SVR4_CDC_reserved2 (__NR_SVR4 + 222) -#define __NR_SVR4_CDC_reserved3 (__NR_SVR4 + 223) -#define __NR_SVR4_CDC_reserved4 (__NR_SVR4 + 224) -#define __NR_SVR4_CDC_reserved5 (__NR_SVR4 + 225) -#define __NR_SVR4_CDC_reserved6 (__NR_SVR4 + 226) -#define __NR_SVR4_CDC_reserved7 (__NR_SVR4 + 227) -#define __NR_SVR4_CDC_reserved8 (__NR_SVR4 + 228) -#define __NR_SVR4_CDC_reserved9 (__NR_SVR4 + 229) -#define __NR_SVR4_CDC_reserved10 (__NR_SVR4 + 230) -#define __NR_SVR4_CDC_reserved11 (__NR_SVR4 + 231) -#define __NR_SVR4_CDC_reserved12 (__NR_SVR4 + 232) -#define __NR_SVR4_CDC_reserved13 (__NR_SVR4 + 233) -#define __NR_SVR4_CDC_reserved14 (__NR_SVR4 + 234) -#define __NR_SVR4_CDC_reserved15 (__NR_SVR4 + 235) -#define __NR_SVR4_CDC_reserved16 (__NR_SVR4 + 236) -#define __NR_SVR4_CDC_reserved17 (__NR_SVR4 + 237) -#define __NR_SVR4_CDC_reserved18 (__NR_SVR4 + 238) -#define __NR_SVR4_CDC_reserved19 (__NR_SVR4 + 239) -#define __NR_SVR4_CDC_reserved20 (__NR_SVR4 + 240) - -/* - * SYS V syscalls are in the range from 1000 to 1999 - */ -#define __NR_SYSV 1000 -#define __NR_SYSV_syscall (__NR_SYSV + 0) -#define __NR_SYSV_exit (__NR_SYSV + 1) -#define __NR_SYSV_fork (__NR_SYSV + 2) -#define __NR_SYSV_read (__NR_SYSV + 3) -#define __NR_SYSV_write (__NR_SYSV + 4) -#define __NR_SYSV_open (__NR_SYSV + 5) -#define __NR_SYSV_close (__NR_SYSV + 6) -#define __NR_SYSV_wait (__NR_SYSV + 7) -#define __NR_SYSV_creat (__NR_SYSV + 8) -#define __NR_SYSV_link (__NR_SYSV + 9) -#define __NR_SYSV_unlink (__NR_SYSV + 10) -#define __NR_SYSV_execv (__NR_SYSV + 11) -#define __NR_SYSV_chdir (__NR_SYSV + 12) -#define __NR_SYSV_time (__NR_SYSV + 13) -#define __NR_SYSV_mknod (__NR_SYSV + 14) -#define __NR_SYSV_chmod (__NR_SYSV + 15) -#define __NR_SYSV_chown (__NR_SYSV + 16) -#define __NR_SYSV_brk (__NR_SYSV + 17) -#define __NR_SYSV_stat (__NR_SYSV + 18) -#define __NR_SYSV_lseek (__NR_SYSV + 19) -#define __NR_SYSV_getpid (__NR_SYSV + 20) -#define __NR_SYSV_mount (__NR_SYSV + 21) -#define __NR_SYSV_umount (__NR_SYSV + 22) -#define __NR_SYSV_setuid (__NR_SYSV + 23) -#define __NR_SYSV_getuid (__NR_SYSV + 24) -#define __NR_SYSV_stime (__NR_SYSV + 25) -#define __NR_SYSV_ptrace (__NR_SYSV + 26) -#define __NR_SYSV_alarm (__NR_SYSV + 27) -#define __NR_SYSV_fstat (__NR_SYSV + 28) -#define __NR_SYSV_pause (__NR_SYSV + 29) -#define __NR_SYSV_utime (__NR_SYSV + 30) -#define __NR_SYSV_stty (__NR_SYSV + 31) -#define __NR_SYSV_gtty (__NR_SYSV + 32) -#define __NR_SYSV_access (__NR_SYSV + 33) -#define __NR_SYSV_nice (__NR_SYSV + 34) -#define __NR_SYSV_statfs (__NR_SYSV + 35) -#define __NR_SYSV_sync (__NR_SYSV + 36) -#define __NR_SYSV_kill (__NR_SYSV + 37) -#define __NR_SYSV_fstatfs (__NR_SYSV + 38) -#define __NR_SYSV_setpgrp (__NR_SYSV + 39) -#define __NR_SYSV_syssgi (__NR_SYSV + 40) -#define __NR_SYSV_dup (__NR_SYSV + 41) -#define __NR_SYSV_pipe (__NR_SYSV + 42) -#define __NR_SYSV_times (__NR_SYSV + 43) -#define __NR_SYSV_profil (__NR_SYSV + 44) -#define __NR_SYSV_plock (__NR_SYSV + 45) -#define __NR_SYSV_setgid (__NR_SYSV + 46) -#define __NR_SYSV_getgid (__NR_SYSV + 47) -#define __NR_SYSV_sig (__NR_SYSV + 48) -#define __NR_SYSV_msgsys (__NR_SYSV + 49) -#define __NR_SYSV_sysmips (__NR_SYSV + 50) -#define __NR_SYSV_acct (__NR_SYSV + 51) -#define __NR_SYSV_shmsys (__NR_SYSV + 52) -#define __NR_SYSV_semsys (__NR_SYSV + 53) -#define __NR_SYSV_ioctl (__NR_SYSV + 54) -#define __NR_SYSV_uadmin (__NR_SYSV + 55) -#define __NR_SYSV_sysmp (__NR_SYSV + 56) -#define __NR_SYSV_utssys (__NR_SYSV + 57) -#define __NR_SYSV_USG_reserved1 (__NR_SYSV + 58) -#define __NR_SYSV_execve (__NR_SYSV + 59) -#define __NR_SYSV_umask (__NR_SYSV + 60) -#define __NR_SYSV_chroot (__NR_SYSV + 61) -#define __NR_SYSV_fcntl (__NR_SYSV + 62) -#define __NR_SYSV_ulimit (__NR_SYSV + 63) -#define __NR_SYSV_SAFARI4_reserved1 (__NR_SYSV + 64) -#define __NR_SYSV_SAFARI4_reserved2 (__NR_SYSV + 65) -#define __NR_SYSV_SAFARI4_reserved3 (__NR_SYSV + 66) -#define __NR_SYSV_SAFARI4_reserved4 (__NR_SYSV + 67) -#define __NR_SYSV_SAFARI4_reserved5 (__NR_SYSV + 68) -#define __NR_SYSV_SAFARI4_reserved6 (__NR_SYSV + 69) -#define __NR_SYSV_advfs (__NR_SYSV + 70) -#define __NR_SYSV_unadvfs (__NR_SYSV + 71) -#define __NR_SYSV_rmount (__NR_SYSV + 72) -#define __NR_SYSV_rumount (__NR_SYSV + 73) -#define __NR_SYSV_rfstart (__NR_SYSV + 74) -#define __NR_SYSV_getrlimit64 (__NR_SYSV + 75) -#define __NR_SYSV_setrlimit64 (__NR_SYSV + 76) -#define __NR_SYSV_nanosleep (__NR_SYSV + 77) -#define __NR_SYSV_lseek64 (__NR_SYSV + 78) -#define __NR_SYSV_rmdir (__NR_SYSV + 79) -#define __NR_SYSV_mkdir (__NR_SYSV + 80) -#define __NR_SYSV_getdents (__NR_SYSV + 81) -#define __NR_SYSV_sginap (__NR_SYSV + 82) -#define __NR_SYSV_sgikopt (__NR_SYSV + 83) -#define __NR_SYSV_sysfs (__NR_SYSV + 84) -#define __NR_SYSV_getmsg (__NR_SYSV + 85) -#define __NR_SYSV_putmsg (__NR_SYSV + 86) -#define __NR_SYSV_poll (__NR_SYSV + 87) -#define __NR_SYSV_sigreturn (__NR_SYSV + 88) -#define __NR_SYSV_accept (__NR_SYSV + 89) -#define __NR_SYSV_bind (__NR_SYSV + 90) -#define __NR_SYSV_connect (__NR_SYSV + 91) -#define __NR_SYSV_gethostid (__NR_SYSV + 92) -#define __NR_SYSV_getpeername (__NR_SYSV + 93) -#define __NR_SYSV_getsockname (__NR_SYSV + 94) -#define __NR_SYSV_getsockopt (__NR_SYSV + 95) -#define __NR_SYSV_listen (__NR_SYSV + 96) -#define __NR_SYSV_recv (__NR_SYSV + 97) -#define __NR_SYSV_recvfrom (__NR_SYSV + 98) -#define __NR_SYSV_recvmsg (__NR_SYSV + 99) -#define __NR_SYSV_select (__NR_SYSV + 100) -#define __NR_SYSV_send (__NR_SYSV + 101) -#define __NR_SYSV_sendmsg (__NR_SYSV + 102) -#define __NR_SYSV_sendto (__NR_SYSV + 103) -#define __NR_SYSV_sethostid (__NR_SYSV + 104) -#define __NR_SYSV_setsockopt (__NR_SYSV + 105) -#define __NR_SYSV_shutdown (__NR_SYSV + 106) -#define __NR_SYSV_socket (__NR_SYSV + 107) -#define __NR_SYSV_gethostname (__NR_SYSV + 108) -#define __NR_SYSV_sethostname (__NR_SYSV + 109) -#define __NR_SYSV_getdomainname (__NR_SYSV + 110) -#define __NR_SYSV_setdomainname (__NR_SYSV + 111) -#define __NR_SYSV_truncate (__NR_SYSV + 112) -#define __NR_SYSV_ftruncate (__NR_SYSV + 113) -#define __NR_SYSV_rename (__NR_SYSV + 114) -#define __NR_SYSV_symlink (__NR_SYSV + 115) -#define __NR_SYSV_readlink (__NR_SYSV + 116) -#define __NR_SYSV_lstat (__NR_SYSV + 117) -#define __NR_SYSV_nfsmount (__NR_SYSV + 118) -#define __NR_SYSV_nfssvc (__NR_SYSV + 119) -#define __NR_SYSV_getfh (__NR_SYSV + 120) -#define __NR_SYSV_async_daemon (__NR_SYSV + 121) -#define __NR_SYSV_exportfs (__NR_SYSV + 122) -#define __NR_SYSV_setregid (__NR_SYSV + 123) -#define __NR_SYSV_setreuid (__NR_SYSV + 124) -#define __NR_SYSV_getitimer (__NR_SYSV + 125) -#define __NR_SYSV_setitimer (__NR_SYSV + 126) -#define __NR_SYSV_adjtime (__NR_SYSV + 127) -#define __NR_SYSV_BSD_getime (__NR_SYSV + 128) -#define __NR_SYSV_sproc (__NR_SYSV + 129) -#define __NR_SYSV_prctl (__NR_SYSV + 130) -#define __NR_SYSV_procblk (__NR_SYSV + 131) -#define __NR_SYSV_sprocsp (__NR_SYSV + 132) -#define __NR_SYSV_sgigsc (__NR_SYSV + 133) -#define __NR_SYSV_mmap (__NR_SYSV + 134) -#define __NR_SYSV_munmap (__NR_SYSV + 135) -#define __NR_SYSV_mprotect (__NR_SYSV + 136) -#define __NR_SYSV_msync (__NR_SYSV + 137) -#define __NR_SYSV_madvise (__NR_SYSV + 138) -#define __NR_SYSV_pagelock (__NR_SYSV + 139) -#define __NR_SYSV_getpagesize (__NR_SYSV + 140) -#define __NR_SYSV_quotactl (__NR_SYSV + 141) -#define __NR_SYSV_libdetach (__NR_SYSV + 142) -#define __NR_SYSV_BSDgetpgrp (__NR_SYSV + 143) -#define __NR_SYSV_BSDsetpgrp (__NR_SYSV + 144) -#define __NR_SYSV_vhangup (__NR_SYSV + 145) -#define __NR_SYSV_fsync (__NR_SYSV + 146) -#define __NR_SYSV_fchdir (__NR_SYSV + 147) -#define __NR_SYSV_getrlimit (__NR_SYSV + 148) -#define __NR_SYSV_setrlimit (__NR_SYSV + 149) -#define __NR_SYSV_cacheflush (__NR_SYSV + 150) -#define __NR_SYSV_cachectl (__NR_SYSV + 151) -#define __NR_SYSV_fchown (__NR_SYSV + 152) -#define __NR_SYSV_fchmod (__NR_SYSV + 153) -#define __NR_SYSV_wait3 (__NR_SYSV + 154) -#define __NR_SYSV_socketpair (__NR_SYSV + 155) -#define __NR_SYSV_sysinfo (__NR_SYSV + 156) -#define __NR_SYSV_nuname (__NR_SYSV + 157) -#define __NR_SYSV_xstat (__NR_SYSV + 158) -#define __NR_SYSV_lxstat (__NR_SYSV + 159) -#define __NR_SYSV_fxstat (__NR_SYSV + 160) -#define __NR_SYSV_xmknod (__NR_SYSV + 161) -#define __NR_SYSV_ksigaction (__NR_SYSV + 162) -#define __NR_SYSV_sigpending (__NR_SYSV + 163) -#define __NR_SYSV_sigprocmask (__NR_SYSV + 164) -#define __NR_SYSV_sigsuspend (__NR_SYSV + 165) -#define __NR_SYSV_sigpoll (__NR_SYSV + 166) -#define __NR_SYSV_swapctl (__NR_SYSV + 167) -#define __NR_SYSV_getcontext (__NR_SYSV + 168) -#define __NR_SYSV_setcontext (__NR_SYSV + 169) -#define __NR_SYSV_waitsys (__NR_SYSV + 170) -#define __NR_SYSV_sigstack (__NR_SYSV + 171) -#define __NR_SYSV_sigaltstack (__NR_SYSV + 172) -#define __NR_SYSV_sigsendset (__NR_SYSV + 173) -#define __NR_SYSV_statvfs (__NR_SYSV + 174) -#define __NR_SYSV_fstatvfs (__NR_SYSV + 175) -#define __NR_SYSV_getpmsg (__NR_SYSV + 176) -#define __NR_SYSV_putpmsg (__NR_SYSV + 177) -#define __NR_SYSV_lchown (__NR_SYSV + 178) -#define __NR_SYSV_priocntl (__NR_SYSV + 179) -#define __NR_SYSV_ksigqueue (__NR_SYSV + 180) -#define __NR_SYSV_readv (__NR_SYSV + 181) -#define __NR_SYSV_writev (__NR_SYSV + 182) -#define __NR_SYSV_truncate64 (__NR_SYSV + 183) -#define __NR_SYSV_ftruncate64 (__NR_SYSV + 184) -#define __NR_SYSV_mmap64 (__NR_SYSV + 185) -#define __NR_SYSV_dmi (__NR_SYSV + 186) -#define __NR_SYSV_pread (__NR_SYSV + 187) -#define __NR_SYSV_pwrite (__NR_SYSV + 188) - -/* - * BSD 4.3 syscalls are in the range from 2000 to 2999 - */ -#define __NR_BSD43 2000 -#define __NR_BSD43_syscall (__NR_BSD43 + 0) -#define __NR_BSD43_exit (__NR_BSD43 + 1) -#define __NR_BSD43_fork (__NR_BSD43 + 2) -#define __NR_BSD43_read (__NR_BSD43 + 3) -#define __NR_BSD43_write (__NR_BSD43 + 4) -#define __NR_BSD43_open (__NR_BSD43 + 5) -#define __NR_BSD43_close (__NR_BSD43 + 6) -#define __NR_BSD43_wait (__NR_BSD43 + 7) -#define __NR_BSD43_creat (__NR_BSD43 + 8) -#define __NR_BSD43_link (__NR_BSD43 + 9) -#define __NR_BSD43_unlink (__NR_BSD43 + 10) -#define __NR_BSD43_exec (__NR_BSD43 + 11) -#define __NR_BSD43_chdir (__NR_BSD43 + 12) -#define __NR_BSD43_time (__NR_BSD43 + 13) -#define __NR_BSD43_mknod (__NR_BSD43 + 14) -#define __NR_BSD43_chmod (__NR_BSD43 + 15) -#define __NR_BSD43_chown (__NR_BSD43 + 16) -#define __NR_BSD43_sbreak (__NR_BSD43 + 17) -#define __NR_BSD43_oldstat (__NR_BSD43 + 18) -#define __NR_BSD43_lseek (__NR_BSD43 + 19) -#define __NR_BSD43_getpid (__NR_BSD43 + 20) -#define __NR_BSD43_oldmount (__NR_BSD43 + 21) -#define __NR_BSD43_umount (__NR_BSD43 + 22) -#define __NR_BSD43_setuid (__NR_BSD43 + 23) -#define __NR_BSD43_getuid (__NR_BSD43 + 24) -#define __NR_BSD43_stime (__NR_BSD43 + 25) -#define __NR_BSD43_ptrace (__NR_BSD43 + 26) -#define __NR_BSD43_alarm (__NR_BSD43 + 27) -#define __NR_BSD43_oldfstat (__NR_BSD43 + 28) -#define __NR_BSD43_pause (__NR_BSD43 + 29) -#define __NR_BSD43_utime (__NR_BSD43 + 30) -#define __NR_BSD43_stty (__NR_BSD43 + 31) -#define __NR_BSD43_gtty (__NR_BSD43 + 32) -#define __NR_BSD43_access (__NR_BSD43 + 33) -#define __NR_BSD43_nice (__NR_BSD43 + 34) -#define __NR_BSD43_ftime (__NR_BSD43 + 35) -#define __NR_BSD43_sync (__NR_BSD43 + 36) -#define __NR_BSD43_kill (__NR_BSD43 + 37) -#define __NR_BSD43_stat (__NR_BSD43 + 38) -#define __NR_BSD43_oldsetpgrp (__NR_BSD43 + 39) -#define __NR_BSD43_lstat (__NR_BSD43 + 40) -#define __NR_BSD43_dup (__NR_BSD43 + 41) -#define __NR_BSD43_pipe (__NR_BSD43 + 42) -#define __NR_BSD43_times (__NR_BSD43 + 43) -#define __NR_BSD43_profil (__NR_BSD43 + 44) -#define __NR_BSD43_msgsys (__NR_BSD43 + 45) -#define __NR_BSD43_setgid (__NR_BSD43 + 46) -#define __NR_BSD43_getgid (__NR_BSD43 + 47) -#define __NR_BSD43_ssig (__NR_BSD43 + 48) -#define __NR_BSD43_reserved1 (__NR_BSD43 + 49) -#define __NR_BSD43_reserved2 (__NR_BSD43 + 50) -#define __NR_BSD43_sysacct (__NR_BSD43 + 51) -#define __NR_BSD43_phys (__NR_BSD43 + 52) -#define __NR_BSD43_lock (__NR_BSD43 + 53) -#define __NR_BSD43_ioctl (__NR_BSD43 + 54) -#define __NR_BSD43_reboot (__NR_BSD43 + 55) -#define __NR_BSD43_mpxchan (__NR_BSD43 + 56) -#define __NR_BSD43_symlink (__NR_BSD43 + 57) -#define __NR_BSD43_readlink (__NR_BSD43 + 58) -#define __NR_BSD43_execve (__NR_BSD43 + 59) -#define __NR_BSD43_umask (__NR_BSD43 + 60) -#define __NR_BSD43_chroot (__NR_BSD43 + 61) -#define __NR_BSD43_fstat (__NR_BSD43 + 62) -#define __NR_BSD43_reserved3 (__NR_BSD43 + 63) -#define __NR_BSD43_getpagesize (__NR_BSD43 + 64) -#define __NR_BSD43_mremap (__NR_BSD43 + 65) -#define __NR_BSD43_vfork (__NR_BSD43 + 66) -#define __NR_BSD43_vread (__NR_BSD43 + 67) -#define __NR_BSD43_vwrite (__NR_BSD43 + 68) -#define __NR_BSD43_sbrk (__NR_BSD43 + 69) -#define __NR_BSD43_sstk (__NR_BSD43 + 70) -#define __NR_BSD43_mmap (__NR_BSD43 + 71) -#define __NR_BSD43_vadvise (__NR_BSD43 + 72) -#define __NR_BSD43_munmap (__NR_BSD43 + 73) -#define __NR_BSD43_mprotect (__NR_BSD43 + 74) -#define __NR_BSD43_madvise (__NR_BSD43 + 75) -#define __NR_BSD43_vhangup (__NR_BSD43 + 76) -#define __NR_BSD43_vlimit (__NR_BSD43 + 77) -#define __NR_BSD43_mincore (__NR_BSD43 + 78) -#define __NR_BSD43_getgroups (__NR_BSD43 + 79) -#define __NR_BSD43_setgroups (__NR_BSD43 + 80) -#define __NR_BSD43_getpgrp (__NR_BSD43 + 81) -#define __NR_BSD43_setpgrp (__NR_BSD43 + 82) -#define __NR_BSD43_setitimer (__NR_BSD43 + 83) -#define __NR_BSD43_wait3 (__NR_BSD43 + 84) -#define __NR_BSD43_swapon (__NR_BSD43 + 85) -#define __NR_BSD43_getitimer (__NR_BSD43 + 86) -#define __NR_BSD43_gethostname (__NR_BSD43 + 87) -#define __NR_BSD43_sethostname (__NR_BSD43 + 88) -#define __NR_BSD43_getdtablesize (__NR_BSD43 + 89) -#define __NR_BSD43_dup2 (__NR_BSD43 + 90) -#define __NR_BSD43_getdopt (__NR_BSD43 + 91) -#define __NR_BSD43_fcntl (__NR_BSD43 + 92) -#define __NR_BSD43_select (__NR_BSD43 + 93) -#define __NR_BSD43_setdopt (__NR_BSD43 + 94) -#define __NR_BSD43_fsync (__NR_BSD43 + 95) -#define __NR_BSD43_setpriority (__NR_BSD43 + 96) -#define __NR_BSD43_socket (__NR_BSD43 + 97) -#define __NR_BSD43_connect (__NR_BSD43 + 98) -#define __NR_BSD43_oldaccept (__NR_BSD43 + 99) -#define __NR_BSD43_getpriority (__NR_BSD43 + 100) -#define __NR_BSD43_send (__NR_BSD43 + 101) -#define __NR_BSD43_recv (__NR_BSD43 + 102) -#define __NR_BSD43_sigreturn (__NR_BSD43 + 103) -#define __NR_BSD43_bind (__NR_BSD43 + 104) -#define __NR_BSD43_setsockopt (__NR_BSD43 + 105) -#define __NR_BSD43_listen (__NR_BSD43 + 106) -#define __NR_BSD43_vtimes (__NR_BSD43 + 107) -#define __NR_BSD43_sigvec (__NR_BSD43 + 108) -#define __NR_BSD43_sigblock (__NR_BSD43 + 109) -#define __NR_BSD43_sigsetmask (__NR_BSD43 + 110) -#define __NR_BSD43_sigpause (__NR_BSD43 + 111) -#define __NR_BSD43_sigstack (__NR_BSD43 + 112) -#define __NR_BSD43_oldrecvmsg (__NR_BSD43 + 113) -#define __NR_BSD43_oldsendmsg (__NR_BSD43 + 114) -#define __NR_BSD43_vtrace (__NR_BSD43 + 115) -#define __NR_BSD43_gettimeofday (__NR_BSD43 + 116) -#define __NR_BSD43_getrusage (__NR_BSD43 + 117) -#define __NR_BSD43_getsockopt (__NR_BSD43 + 118) -#define __NR_BSD43_reserved4 (__NR_BSD43 + 119) -#define __NR_BSD43_readv (__NR_BSD43 + 120) -#define __NR_BSD43_writev (__NR_BSD43 + 121) -#define __NR_BSD43_settimeofday (__NR_BSD43 + 122) -#define __NR_BSD43_fchown (__NR_BSD43 + 123) -#define __NR_BSD43_fchmod (__NR_BSD43 + 124) -#define __NR_BSD43_oldrecvfrom (__NR_BSD43 + 125) -#define __NR_BSD43_setreuid (__NR_BSD43 + 126) -#define __NR_BSD43_setregid (__NR_BSD43 + 127) -#define __NR_BSD43_rename (__NR_BSD43 + 128) -#define __NR_BSD43_truncate (__NR_BSD43 + 129) -#define __NR_BSD43_ftruncate (__NR_BSD43 + 130) -#define __NR_BSD43_flock (__NR_BSD43 + 131) -#define __NR_BSD43_semsys (__NR_BSD43 + 132) -#define __NR_BSD43_sendto (__NR_BSD43 + 133) -#define __NR_BSD43_shutdown (__NR_BSD43 + 134) -#define __NR_BSD43_socketpair (__NR_BSD43 + 135) -#define __NR_BSD43_mkdir (__NR_BSD43 + 136) -#define __NR_BSD43_rmdir (__NR_BSD43 + 137) -#define __NR_BSD43_utimes (__NR_BSD43 + 138) -#define __NR_BSD43_sigcleanup (__NR_BSD43 + 139) -#define __NR_BSD43_adjtime (__NR_BSD43 + 140) -#define __NR_BSD43_oldgetpeername (__NR_BSD43 + 141) -#define __NR_BSD43_gethostid (__NR_BSD43 + 142) -#define __NR_BSD43_sethostid (__NR_BSD43 + 143) -#define __NR_BSD43_getrlimit (__NR_BSD43 + 144) -#define __NR_BSD43_setrlimit (__NR_BSD43 + 145) -#define __NR_BSD43_killpg (__NR_BSD43 + 146) -#define __NR_BSD43_shmsys (__NR_BSD43 + 147) -#define __NR_BSD43_quota (__NR_BSD43 + 148) -#define __NR_BSD43_qquota (__NR_BSD43 + 149) -#define __NR_BSD43_oldgetsockname (__NR_BSD43 + 150) -#define __NR_BSD43_sysmips (__NR_BSD43 + 151) -#define __NR_BSD43_cacheflush (__NR_BSD43 + 152) -#define __NR_BSD43_cachectl (__NR_BSD43 + 153) -#define __NR_BSD43_debug (__NR_BSD43 + 154) -#define __NR_BSD43_reserved5 (__NR_BSD43 + 155) -#define __NR_BSD43_reserved6 (__NR_BSD43 + 156) -#define __NR_BSD43_nfs_mount (__NR_BSD43 + 157) -#define __NR_BSD43_nfs_svc (__NR_BSD43 + 158) -#define __NR_BSD43_getdirentries (__NR_BSD43 + 159) -#define __NR_BSD43_statfs (__NR_BSD43 + 160) -#define __NR_BSD43_fstatfs (__NR_BSD43 + 161) -#define __NR_BSD43_unmount (__NR_BSD43 + 162) -#define __NR_BSD43_async_daemon (__NR_BSD43 + 163) -#define __NR_BSD43_nfs_getfh (__NR_BSD43 + 164) -#define __NR_BSD43_getdomainname (__NR_BSD43 + 165) -#define __NR_BSD43_setdomainname (__NR_BSD43 + 166) -#define __NR_BSD43_pcfs_mount (__NR_BSD43 + 167) -#define __NR_BSD43_quotactl (__NR_BSD43 + 168) -#define __NR_BSD43_oldexportfs (__NR_BSD43 + 169) -#define __NR_BSD43_smount (__NR_BSD43 + 170) -#define __NR_BSD43_mipshwconf (__NR_BSD43 + 171) -#define __NR_BSD43_exportfs (__NR_BSD43 + 172) -#define __NR_BSD43_nfsfh_open (__NR_BSD43 + 173) -#define __NR_BSD43_libattach (__NR_BSD43 + 174) -#define __NR_BSD43_libdetach (__NR_BSD43 + 175) -#define __NR_BSD43_accept (__NR_BSD43 + 176) -#define __NR_BSD43_reserved7 (__NR_BSD43 + 177) -#define __NR_BSD43_reserved8 (__NR_BSD43 + 178) -#define __NR_BSD43_recvmsg (__NR_BSD43 + 179) -#define __NR_BSD43_recvfrom (__NR_BSD43 + 180) -#define __NR_BSD43_sendmsg (__NR_BSD43 + 181) -#define __NR_BSD43_getpeername (__NR_BSD43 + 182) -#define __NR_BSD43_getsockname (__NR_BSD43 + 183) -#define __NR_BSD43_aread (__NR_BSD43 + 184) -#define __NR_BSD43_awrite (__NR_BSD43 + 185) -#define __NR_BSD43_listio (__NR_BSD43 + 186) -#define __NR_BSD43_acancel (__NR_BSD43 + 187) -#define __NR_BSD43_astatus (__NR_BSD43 + 188) -#define __NR_BSD43_await (__NR_BSD43 + 189) -#define __NR_BSD43_areadv (__NR_BSD43 + 190) -#define __NR_BSD43_awritev (__NR_BSD43 + 191) - -/* - * POSIX syscalls are in the range from 3000 to 3999 - */ -#define __NR_POSIX 3000 -#define __NR_POSIX_syscall (__NR_POSIX + 0) -#define __NR_POSIX_exit (__NR_POSIX + 1) -#define __NR_POSIX_fork (__NR_POSIX + 2) -#define __NR_POSIX_read (__NR_POSIX + 3) -#define __NR_POSIX_write (__NR_POSIX + 4) -#define __NR_POSIX_open (__NR_POSIX + 5) -#define __NR_POSIX_close (__NR_POSIX + 6) -#define __NR_POSIX_wait (__NR_POSIX + 7) -#define __NR_POSIX_creat (__NR_POSIX + 8) -#define __NR_POSIX_link (__NR_POSIX + 9) -#define __NR_POSIX_unlink (__NR_POSIX + 10) -#define __NR_POSIX_exec (__NR_POSIX + 11) -#define __NR_POSIX_chdir (__NR_POSIX + 12) -#define __NR_POSIX_gtime (__NR_POSIX + 13) -#define __NR_POSIX_mknod (__NR_POSIX + 14) -#define __NR_POSIX_chmod (__NR_POSIX + 15) -#define __NR_POSIX_chown (__NR_POSIX + 16) -#define __NR_POSIX_sbreak (__NR_POSIX + 17) -#define __NR_POSIX_stat (__NR_POSIX + 18) -#define __NR_POSIX_lseek (__NR_POSIX + 19) -#define __NR_POSIX_getpid (__NR_POSIX + 20) -#define __NR_POSIX_mount (__NR_POSIX + 21) -#define __NR_POSIX_umount (__NR_POSIX + 22) -#define __NR_POSIX_setuid (__NR_POSIX + 23) -#define __NR_POSIX_getuid (__NR_POSIX + 24) -#define __NR_POSIX_stime (__NR_POSIX + 25) -#define __NR_POSIX_ptrace (__NR_POSIX + 26) -#define __NR_POSIX_alarm (__NR_POSIX + 27) -#define __NR_POSIX_fstat (__NR_POSIX + 28) -#define __NR_POSIX_pause (__NR_POSIX + 29) -#define __NR_POSIX_utime (__NR_POSIX + 30) -#define __NR_POSIX_stty (__NR_POSIX + 31) -#define __NR_POSIX_gtty (__NR_POSIX + 32) -#define __NR_POSIX_access (__NR_POSIX + 33) -#define __NR_POSIX_nice (__NR_POSIX + 34) -#define __NR_POSIX_statfs (__NR_POSIX + 35) -#define __NR_POSIX_sync (__NR_POSIX + 36) -#define __NR_POSIX_kill (__NR_POSIX + 37) -#define __NR_POSIX_fstatfs (__NR_POSIX + 38) -#define __NR_POSIX_getpgrp (__NR_POSIX + 39) -#define __NR_POSIX_syssgi (__NR_POSIX + 40) -#define __NR_POSIX_dup (__NR_POSIX + 41) -#define __NR_POSIX_pipe (__NR_POSIX + 42) -#define __NR_POSIX_times (__NR_POSIX + 43) -#define __NR_POSIX_profil (__NR_POSIX + 44) -#define __NR_POSIX_lock (__NR_POSIX + 45) -#define __NR_POSIX_setgid (__NR_POSIX + 46) -#define __NR_POSIX_getgid (__NR_POSIX + 47) -#define __NR_POSIX_sig (__NR_POSIX + 48) -#define __NR_POSIX_msgsys (__NR_POSIX + 49) -#define __NR_POSIX_sysmips (__NR_POSIX + 50) -#define __NR_POSIX_sysacct (__NR_POSIX + 51) -#define __NR_POSIX_shmsys (__NR_POSIX + 52) -#define __NR_POSIX_semsys (__NR_POSIX + 53) -#define __NR_POSIX_ioctl (__NR_POSIX + 54) -#define __NR_POSIX_uadmin (__NR_POSIX + 55) -#define __NR_POSIX_exch (__NR_POSIX + 56) -#define __NR_POSIX_utssys (__NR_POSIX + 57) -#define __NR_POSIX_USG_reserved1 (__NR_POSIX + 58) -#define __NR_POSIX_exece (__NR_POSIX + 59) -#define __NR_POSIX_umask (__NR_POSIX + 60) -#define __NR_POSIX_chroot (__NR_POSIX + 61) -#define __NR_POSIX_fcntl (__NR_POSIX + 62) -#define __NR_POSIX_ulimit (__NR_POSIX + 63) -#define __NR_POSIX_SAFARI4_reserved1 (__NR_POSIX + 64) -#define __NR_POSIX_SAFARI4_reserved2 (__NR_POSIX + 65) -#define __NR_POSIX_SAFARI4_reserved3 (__NR_POSIX + 66) -#define __NR_POSIX_SAFARI4_reserved4 (__NR_POSIX + 67) -#define __NR_POSIX_SAFARI4_reserved5 (__NR_POSIX + 68) -#define __NR_POSIX_SAFARI4_reserved6 (__NR_POSIX + 69) -#define __NR_POSIX_advfs (__NR_POSIX + 70) -#define __NR_POSIX_unadvfs (__NR_POSIX + 71) -#define __NR_POSIX_rmount (__NR_POSIX + 72) -#define __NR_POSIX_rumount (__NR_POSIX + 73) -#define __NR_POSIX_rfstart (__NR_POSIX + 74) -#define __NR_POSIX_reserved1 (__NR_POSIX + 75) -#define __NR_POSIX_rdebug (__NR_POSIX + 76) -#define __NR_POSIX_rfstop (__NR_POSIX + 77) -#define __NR_POSIX_rfsys (__NR_POSIX + 78) -#define __NR_POSIX_rmdir (__NR_POSIX + 79) -#define __NR_POSIX_mkdir (__NR_POSIX + 80) -#define __NR_POSIX_getdents (__NR_POSIX + 81) -#define __NR_POSIX_sginap (__NR_POSIX + 82) -#define __NR_POSIX_sgikopt (__NR_POSIX + 83) -#define __NR_POSIX_sysfs (__NR_POSIX + 84) -#define __NR_POSIX_getmsg (__NR_POSIX + 85) -#define __NR_POSIX_putmsg (__NR_POSIX + 86) -#define __NR_POSIX_poll (__NR_POSIX + 87) -#define __NR_POSIX_sigreturn (__NR_POSIX + 88) -#define __NR_POSIX_accept (__NR_POSIX + 89) -#define __NR_POSIX_bind (__NR_POSIX + 90) -#define __NR_POSIX_connect (__NR_POSIX + 91) -#define __NR_POSIX_gethostid (__NR_POSIX + 92) -#define __NR_POSIX_getpeername (__NR_POSIX + 93) -#define __NR_POSIX_getsockname (__NR_POSIX + 94) -#define __NR_POSIX_getsockopt (__NR_POSIX + 95) -#define __NR_POSIX_listen (__NR_POSIX + 96) -#define __NR_POSIX_recv (__NR_POSIX + 97) -#define __NR_POSIX_recvfrom (__NR_POSIX + 98) -#define __NR_POSIX_recvmsg (__NR_POSIX + 99) -#define __NR_POSIX_select (__NR_POSIX + 100) -#define __NR_POSIX_send (__NR_POSIX + 101) -#define __NR_POSIX_sendmsg (__NR_POSIX + 102) -#define __NR_POSIX_sendto (__NR_POSIX + 103) -#define __NR_POSIX_sethostid (__NR_POSIX + 104) -#define __NR_POSIX_setsockopt (__NR_POSIX + 105) -#define __NR_POSIX_shutdown (__NR_POSIX + 106) -#define __NR_POSIX_socket (__NR_POSIX + 107) -#define __NR_POSIX_gethostname (__NR_POSIX + 108) -#define __NR_POSIX_sethostname (__NR_POSIX + 109) -#define __NR_POSIX_getdomainname (__NR_POSIX + 110) -#define __NR_POSIX_setdomainname (__NR_POSIX + 111) -#define __NR_POSIX_truncate (__NR_POSIX + 112) -#define __NR_POSIX_ftruncate (__NR_POSIX + 113) -#define __NR_POSIX_rename (__NR_POSIX + 114) -#define __NR_POSIX_symlink (__NR_POSIX + 115) -#define __NR_POSIX_readlink (__NR_POSIX + 116) -#define __NR_POSIX_lstat (__NR_POSIX + 117) -#define __NR_POSIX_nfs_mount (__NR_POSIX + 118) -#define __NR_POSIX_nfs_svc (__NR_POSIX + 119) -#define __NR_POSIX_nfs_getfh (__NR_POSIX + 120) -#define __NR_POSIX_async_daemon (__NR_POSIX + 121) -#define __NR_POSIX_exportfs (__NR_POSIX + 122) -#define __NR_POSIX_SGI_setregid (__NR_POSIX + 123) -#define __NR_POSIX_SGI_setreuid (__NR_POSIX + 124) -#define __NR_POSIX_getitimer (__NR_POSIX + 125) -#define __NR_POSIX_setitimer (__NR_POSIX + 126) -#define __NR_POSIX_adjtime (__NR_POSIX + 127) -#define __NR_POSIX_SGI_bsdgettime (__NR_POSIX + 128) -#define __NR_POSIX_SGI_sproc (__NR_POSIX + 129) -#define __NR_POSIX_SGI_prctl (__NR_POSIX + 130) -#define __NR_POSIX_SGI_blkproc (__NR_POSIX + 131) -#define __NR_POSIX_SGI_reserved1 (__NR_POSIX + 132) -#define __NR_POSIX_SGI_sgigsc (__NR_POSIX + 133) -#define __NR_POSIX_SGI_mmap (__NR_POSIX + 134) -#define __NR_POSIX_SGI_munmap (__NR_POSIX + 135) -#define __NR_POSIX_SGI_mprotect (__NR_POSIX + 136) -#define __NR_POSIX_SGI_msync (__NR_POSIX + 137) -#define __NR_POSIX_SGI_madvise (__NR_POSIX + 138) -#define __NR_POSIX_SGI_mpin (__NR_POSIX + 139) -#define __NR_POSIX_SGI_getpagesize (__NR_POSIX + 140) -#define __NR_POSIX_SGI_libattach (__NR_POSIX + 141) -#define __NR_POSIX_SGI_libdetach (__NR_POSIX + 142) -#define __NR_POSIX_SGI_getpgrp (__NR_POSIX + 143) -#define __NR_POSIX_SGI_setpgrp (__NR_POSIX + 144) -#define __NR_POSIX_SGI_reserved2 (__NR_POSIX + 145) -#define __NR_POSIX_SGI_reserved3 (__NR_POSIX + 146) -#define __NR_POSIX_SGI_reserved4 (__NR_POSIX + 147) -#define __NR_POSIX_SGI_reserved5 (__NR_POSIX + 148) -#define __NR_POSIX_SGI_reserved6 (__NR_POSIX + 149) -#define __NR_POSIX_cacheflush (__NR_POSIX + 150) -#define __NR_POSIX_cachectl (__NR_POSIX + 151) -#define __NR_POSIX_fchown (__NR_POSIX + 152) -#define __NR_POSIX_fchmod (__NR_POSIX + 153) -#define __NR_POSIX_wait3 (__NR_POSIX + 154) -#define __NR_POSIX_mmap (__NR_POSIX + 155) -#define __NR_POSIX_munmap (__NR_POSIX + 156) -#define __NR_POSIX_madvise (__NR_POSIX + 157) -#define __NR_POSIX_BSD_getpagesize (__NR_POSIX + 158) -#define __NR_POSIX_setreuid (__NR_POSIX + 159) -#define __NR_POSIX_setregid (__NR_POSIX + 160) -#define __NR_POSIX_setpgid (__NR_POSIX + 161) -#define __NR_POSIX_getgroups (__NR_POSIX + 162) -#define __NR_POSIX_setgroups (__NR_POSIX + 163) -#define __NR_POSIX_gettimeofday (__NR_POSIX + 164) -#define __NR_POSIX_getrusage (__NR_POSIX + 165) -#define __NR_POSIX_getrlimit (__NR_POSIX + 166) -#define __NR_POSIX_setrlimit (__NR_POSIX + 167) -#define __NR_POSIX_waitpid (__NR_POSIX + 168) -#define __NR_POSIX_dup2 (__NR_POSIX + 169) -#define __NR_POSIX_reserved2 (__NR_POSIX + 170) -#define __NR_POSIX_reserved3 (__NR_POSIX + 171) -#define __NR_POSIX_reserved4 (__NR_POSIX + 172) -#define __NR_POSIX_reserved5 (__NR_POSIX + 173) -#define __NR_POSIX_reserved6 (__NR_POSIX + 174) -#define __NR_POSIX_reserved7 (__NR_POSIX + 175) -#define __NR_POSIX_reserved8 (__NR_POSIX + 176) -#define __NR_POSIX_reserved9 (__NR_POSIX + 177) -#define __NR_POSIX_reserved10 (__NR_POSIX + 178) -#define __NR_POSIX_reserved11 (__NR_POSIX + 179) -#define __NR_POSIX_reserved12 (__NR_POSIX + 180) -#define __NR_POSIX_reserved13 (__NR_POSIX + 181) -#define __NR_POSIX_reserved14 (__NR_POSIX + 182) -#define __NR_POSIX_reserved15 (__NR_POSIX + 183) -#define __NR_POSIX_reserved16 (__NR_POSIX + 184) -#define __NR_POSIX_reserved17 (__NR_POSIX + 185) -#define __NR_POSIX_reserved18 (__NR_POSIX + 186) -#define __NR_POSIX_reserved19 (__NR_POSIX + 187) -#define __NR_POSIX_reserved20 (__NR_POSIX + 188) -#define __NR_POSIX_reserved21 (__NR_POSIX + 189) -#define __NR_POSIX_reserved22 (__NR_POSIX + 190) -#define __NR_POSIX_reserved23 (__NR_POSIX + 191) -#define __NR_POSIX_reserved24 (__NR_POSIX + 192) -#define __NR_POSIX_reserved25 (__NR_POSIX + 193) -#define __NR_POSIX_reserved26 (__NR_POSIX + 194) -#define __NR_POSIX_reserved27 (__NR_POSIX + 195) -#define __NR_POSIX_reserved28 (__NR_POSIX + 196) -#define __NR_POSIX_reserved29 (__NR_POSIX + 197) -#define __NR_POSIX_reserved30 (__NR_POSIX + 198) -#define __NR_POSIX_reserved31 (__NR_POSIX + 199) -#define __NR_POSIX_reserved32 (__NR_POSIX + 200) -#define __NR_POSIX_reserved33 (__NR_POSIX + 201) -#define __NR_POSIX_reserved34 (__NR_POSIX + 202) -#define __NR_POSIX_reserved35 (__NR_POSIX + 203) -#define __NR_POSIX_reserved36 (__NR_POSIX + 204) -#define __NR_POSIX_reserved37 (__NR_POSIX + 205) -#define __NR_POSIX_reserved38 (__NR_POSIX + 206) -#define __NR_POSIX_reserved39 (__NR_POSIX + 207) -#define __NR_POSIX_reserved40 (__NR_POSIX + 208) -#define __NR_POSIX_reserved41 (__NR_POSIX + 209) -#define __NR_POSIX_reserved42 (__NR_POSIX + 210) -#define __NR_POSIX_reserved43 (__NR_POSIX + 211) -#define __NR_POSIX_reserved44 (__NR_POSIX + 212) -#define __NR_POSIX_reserved45 (__NR_POSIX + 213) -#define __NR_POSIX_reserved46 (__NR_POSIX + 214) -#define __NR_POSIX_reserved47 (__NR_POSIX + 215) -#define __NR_POSIX_reserved48 (__NR_POSIX + 216) -#define __NR_POSIX_reserved49 (__NR_POSIX + 217) -#define __NR_POSIX_reserved50 (__NR_POSIX + 218) -#define __NR_POSIX_reserved51 (__NR_POSIX + 219) -#define __NR_POSIX_reserved52 (__NR_POSIX + 220) -#define __NR_POSIX_reserved53 (__NR_POSIX + 221) -#define __NR_POSIX_reserved54 (__NR_POSIX + 222) -#define __NR_POSIX_reserved55 (__NR_POSIX + 223) -#define __NR_POSIX_reserved56 (__NR_POSIX + 224) -#define __NR_POSIX_reserved57 (__NR_POSIX + 225) -#define __NR_POSIX_reserved58 (__NR_POSIX + 226) -#define __NR_POSIX_reserved59 (__NR_POSIX + 227) -#define __NR_POSIX_reserved60 (__NR_POSIX + 228) -#define __NR_POSIX_reserved61 (__NR_POSIX + 229) -#define __NR_POSIX_reserved62 (__NR_POSIX + 230) -#define __NR_POSIX_reserved63 (__NR_POSIX + 231) -#define __NR_POSIX_reserved64 (__NR_POSIX + 232) -#define __NR_POSIX_reserved65 (__NR_POSIX + 233) -#define __NR_POSIX_reserved66 (__NR_POSIX + 234) -#define __NR_POSIX_reserved67 (__NR_POSIX + 235) -#define __NR_POSIX_reserved68 (__NR_POSIX + 236) -#define __NR_POSIX_reserved69 (__NR_POSIX + 237) -#define __NR_POSIX_reserved70 (__NR_POSIX + 238) -#define __NR_POSIX_reserved71 (__NR_POSIX + 239) -#define __NR_POSIX_reserved72 (__NR_POSIX + 240) -#define __NR_POSIX_reserved73 (__NR_POSIX + 241) -#define __NR_POSIX_reserved74 (__NR_POSIX + 242) -#define __NR_POSIX_reserved75 (__NR_POSIX + 243) -#define __NR_POSIX_reserved76 (__NR_POSIX + 244) -#define __NR_POSIX_reserved77 (__NR_POSIX + 245) -#define __NR_POSIX_reserved78 (__NR_POSIX + 246) -#define __NR_POSIX_reserved79 (__NR_POSIX + 247) -#define __NR_POSIX_reserved80 (__NR_POSIX + 248) -#define __NR_POSIX_reserved81 (__NR_POSIX + 249) -#define __NR_POSIX_reserved82 (__NR_POSIX + 250) -#define __NR_POSIX_reserved83 (__NR_POSIX + 251) -#define __NR_POSIX_reserved84 (__NR_POSIX + 252) -#define __NR_POSIX_reserved85 (__NR_POSIX + 253) -#define __NR_POSIX_reserved86 (__NR_POSIX + 254) -#define __NR_POSIX_reserved87 (__NR_POSIX + 255) -#define __NR_POSIX_reserved88 (__NR_POSIX + 256) -#define __NR_POSIX_reserved89 (__NR_POSIX + 257) -#define __NR_POSIX_reserved90 (__NR_POSIX + 258) -#define __NR_POSIX_reserved91 (__NR_POSIX + 259) -#define __NR_POSIX_netboot (__NR_POSIX + 260) -#define __NR_POSIX_netunboot (__NR_POSIX + 261) -#define __NR_POSIX_rdump (__NR_POSIX + 262) -#define __NR_POSIX_setsid (__NR_POSIX + 263) -#define __NR_POSIX_getmaxsig (__NR_POSIX + 264) -#define __NR_POSIX_sigpending (__NR_POSIX + 265) -#define __NR_POSIX_sigprocmask (__NR_POSIX + 266) -#define __NR_POSIX_sigsuspend (__NR_POSIX + 267) -#define __NR_POSIX_sigaction (__NR_POSIX + 268) -#define __NR_POSIX_MIPS_reserved1 (__NR_POSIX + 269) -#define __NR_POSIX_MIPS_reserved2 (__NR_POSIX + 270) -#define __NR_POSIX_MIPS_reserved3 (__NR_POSIX + 271) -#define __NR_POSIX_MIPS_reserved4 (__NR_POSIX + 272) -#define __NR_POSIX_MIPS_reserved5 (__NR_POSIX + 273) -#define __NR_POSIX_MIPS_reserved6 (__NR_POSIX + 274) -#define __NR_POSIX_MIPS_reserved7 (__NR_POSIX + 275) -#define __NR_POSIX_MIPS_reserved8 (__NR_POSIX + 276) -#define __NR_POSIX_MIPS_reserved9 (__NR_POSIX + 277) -#define __NR_POSIX_MIPS_reserved10 (__NR_POSIX + 278) -#define __NR_POSIX_MIPS_reserved11 (__NR_POSIX + 279) -#define __NR_POSIX_TANDEM_reserved1 (__NR_POSIX + 280) -#define __NR_POSIX_TANDEM_reserved2 (__NR_POSIX + 281) -#define __NR_POSIX_TANDEM_reserved3 (__NR_POSIX + 282) -#define __NR_POSIX_TANDEM_reserved4 (__NR_POSIX + 283) -#define __NR_POSIX_TANDEM_reserved5 (__NR_POSIX + 284) -#define __NR_POSIX_TANDEM_reserved6 (__NR_POSIX + 285) -#define __NR_POSIX_TANDEM_reserved7 (__NR_POSIX + 286) -#define __NR_POSIX_TANDEM_reserved8 (__NR_POSIX + 287) -#define __NR_POSIX_TANDEM_reserved9 (__NR_POSIX + 288) -#define __NR_POSIX_TANDEM_reserved10 (__NR_POSIX + 289) -#define __NR_POSIX_TANDEM_reserved11 (__NR_POSIX + 290) -#define __NR_POSIX_TANDEM_reserved12 (__NR_POSIX + 291) -#define __NR_POSIX_TANDEM_reserved13 (__NR_POSIX + 292) -#define __NR_POSIX_TANDEM_reserved14 (__NR_POSIX + 293) -#define __NR_POSIX_TANDEM_reserved15 (__NR_POSIX + 294) -#define __NR_POSIX_TANDEM_reserved16 (__NR_POSIX + 295) -#define __NR_POSIX_TANDEM_reserved17 (__NR_POSIX + 296) -#define __NR_POSIX_TANDEM_reserved18 (__NR_POSIX + 297) -#define __NR_POSIX_TANDEM_reserved19 (__NR_POSIX + 298) -#define __NR_POSIX_TANDEM_reserved20 (__NR_POSIX + 299) -#define __NR_POSIX_SGI_reserved7 (__NR_POSIX + 300) -#define __NR_POSIX_SGI_reserved8 (__NR_POSIX + 301) -#define __NR_POSIX_SGI_reserved9 (__NR_POSIX + 302) -#define __NR_POSIX_SGI_reserved10 (__NR_POSIX + 303) -#define __NR_POSIX_SGI_reserved11 (__NR_POSIX + 304) -#define __NR_POSIX_SGI_reserved12 (__NR_POSIX + 305) -#define __NR_POSIX_SGI_reserved13 (__NR_POSIX + 306) -#define __NR_POSIX_SGI_reserved14 (__NR_POSIX + 307) -#define __NR_POSIX_SGI_reserved15 (__NR_POSIX + 308) -#define __NR_POSIX_SGI_reserved16 (__NR_POSIX + 309) -#define __NR_POSIX_SGI_reserved17 (__NR_POSIX + 310) -#define __NR_POSIX_SGI_reserved18 (__NR_POSIX + 311) -#define __NR_POSIX_SGI_reserved19 (__NR_POSIX + 312) -#define __NR_POSIX_SGI_reserved20 (__NR_POSIX + 313) -#define __NR_POSIX_SGI_reserved21 (__NR_POSIX + 314) -#define __NR_POSIX_SGI_reserved22 (__NR_POSIX + 315) -#define __NR_POSIX_SGI_reserved23 (__NR_POSIX + 316) -#define __NR_POSIX_SGI_reserved24 (__NR_POSIX + 317) -#define __NR_POSIX_SGI_reserved25 (__NR_POSIX + 318) -#define __NR_POSIX_SGI_reserved26 (__NR_POSIX + 319) - -#endif /* _ASM_RISCOS_SYSCALL_H */ diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 983e9a2..64ebd08 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h @@ -216,4 +216,5 @@ static __inline__ int atomic_read(const atomic_t *v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include <asm-generic/atomic.h> #endif diff --git a/include/asm-parisc/mman.h b/include/asm-parisc/mman.h index e829607..736b0ab 100644 --- a/include/asm-parisc/mman.h +++ b/include/asm-parisc/mman.h @@ -38,6 +38,7 @@ #define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ #define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ #define MADV_VPS_INHERIT 7 /* Inherit parents page size */ +#define MADV_REMOVE 8 /* remove these pages & resources */ /* The range 12-64 is reserved for page size specification. */ #define MADV_4K_PAGES 12 /* Use 4K pages */ diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index ec4b144..ae395a0 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -402,5 +402,6 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) #endif /* __powerpc64__ */ +#include <asm-generic/atomic.h> #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h index f5e5342..a2e34c2 100644 --- a/include/asm-powerpc/mman.h +++ b/include/asm-powerpc/mman.h @@ -44,6 +44,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-ppc/ibm_ocp.h b/include/asm-ppc/ibm_ocp.h index 9c21de1..ddce616 100644 --- a/include/asm-ppc/ibm_ocp.h +++ b/include/asm-ppc/ibm_ocp.h @@ -63,7 +63,6 @@ struct ocp_func_emac_data { int wol_irq; /* WOL interrupt */ int mdio_idx; /* EMAC idx of MDIO master or -1 */ int tah_idx; /* TAH device index or -1 */ - int jumbo; /* Jumbo frames capable flag */ int phy_mode; /* PHY type or configurable mode */ u8 mac_addr[6]; /* EMAC mac address */ u32 phy_map; /* EMAC phy map */ diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 84ac6e2..df9cf6e 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -27,6 +27,8 @@ #if defined(CONFIG_4xx) #include <asm/ibm4xx.h> +#elif defined(CONFIG_PPC_MPC52xx) +#include <asm/mpc52xx.h> #elif defined(CONFIG_8xx) #include <asm/mpc8xx.h> #elif defined(CONFIG_8260) diff --git a/include/asm-ppc/mpc52xx.h b/include/asm-ppc/mpc52xx.h index e5f80c2..a055e07 100644 --- a/include/asm-ppc/mpc52xx.h +++ b/include/asm-ppc/mpc52xx.h @@ -29,6 +29,17 @@ struct pt_regs; #endif /* __ASSEMBLY__ */ +#ifdef CONFIG_PCI +#define _IO_BASE isa_io_base +#define _ISA_MEM_BASE isa_mem_base +#define PCI_DRAM_OFFSET pci_dram_offset +#else +#define _IO_BASE 0 +#define _ISA_MEM_BASE 0 +#define PCI_DRAM_OFFSET 0 +#endif + + /* ======================================================================== */ /* PPC Sys devices definition */ /* ======================================================================== */ @@ -107,7 +118,7 @@ enum ppc_sys_devices { #define MPC52xx_SDMA_IRQ_NUM 17 #define MPC52xx_PERP_IRQ_NUM 23 -#define MPC52xx_CRIT_IRQ_BASE 0 +#define MPC52xx_CRIT_IRQ_BASE 1 #define MPC52xx_MAIN_IRQ_BASE (MPC52xx_CRIT_IRQ_BASE + MPC52xx_CRIT_IRQ_NUM) #define MPC52xx_SDMA_IRQ_BASE (MPC52xx_MAIN_IRQ_BASE + MPC52xx_MAIN_IRQ_NUM) #define MPC52xx_PERP_IRQ_BASE (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM) diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index b3bd4f6..d82aedf 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -5,7 +5,7 @@ * include/asm-s390/atomic.h * * S390 version - * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Denis Joseph Barrow, * Arnd Bergmann (arndb@de.ibm.com) @@ -45,59 +45,57 @@ typedef struct { #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic_add(int i, atomic_t * v) -{ - __CS_LOOP(v, i, "ar"); -} static __inline__ int atomic_add_return(int i, atomic_t * v) { return __CS_LOOP(v, i, "ar"); } -static __inline__ int atomic_add_negative(int i, atomic_t * v) -{ - return __CS_LOOP(v, i, "ar") < 0; -} -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - __CS_LOOP(v, i, "sr"); -} +#define atomic_add(_i, _v) atomic_add_return(_i, _v) +#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) +#define atomic_inc(_v) atomic_add_return(1, _v) +#define atomic_inc_return(_v) atomic_add_return(1, _v) +#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) + static __inline__ int atomic_sub_return(int i, atomic_t * v) { return __CS_LOOP(v, i, "sr"); } -static __inline__ void atomic_inc(volatile atomic_t * v) -{ - __CS_LOOP(v, 1, "ar"); -} -static __inline__ int atomic_inc_return(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "ar"); -} +#define atomic_sub(_i, _v) atomic_sub_return(_i, _v) +#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) +#define atomic_dec(_v) atomic_sub_return(1, _v) +#define atomic_dec_return(_v) atomic_sub_return(1, _v) +#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static __inline__ int atomic_inc_and_test(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "ar") == 0; -} -static __inline__ void atomic_dec(volatile atomic_t * v) -{ - __CS_LOOP(v, 1, "sr"); -} -static __inline__ int atomic_dec_return(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "sr"); -} -static __inline__ int atomic_dec_and_test(volatile atomic_t * v) -{ - return __CS_LOOP(v, 1, "sr") == 0; -} static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) { __CS_LOOP(v, ~mask, "nr"); } + static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) { __CS_LOOP(v, mask, "or"); } + +static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + __asm__ __volatile__(" cs %0,%3,0(%2)\n" + : "+d" (old), "=m" (v->counter) + : "a" (v), "d" (new), "m" (v->counter) + : "cc", "memory" ); + return old; +} + +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c != u; +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #undef __CS_LOOP #ifdef __s390x__ @@ -123,97 +121,67 @@ typedef struct { #define atomic64_read(v) ((v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic64_add(long long i, atomic64_t * v) -{ - __CSG_LOOP(v, i, "agr"); -} static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) -{ - return __CSG_LOOP(v, i, "agr") < 0; -} -static __inline__ void atomic64_sub(long long i, atomic64_t * v) -{ - __CSG_LOOP(v, i, "sgr"); -} -static __inline__ void atomic64_inc(volatile atomic64_t * v) -{ - __CSG_LOOP(v, 1, "agr"); -} -static __inline__ long long atomic64_inc_return(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "agr"); -} -static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "agr") == 0; -} -static __inline__ void atomic64_dec(volatile atomic64_t * v) -{ - __CSG_LOOP(v, 1, "sgr"); -} -static __inline__ long long atomic64_dec_return(volatile atomic64_t * v) -{ - return __CSG_LOOP(v, 1, "sgr"); -} -static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v) +#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) +#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) +#define atomic64_inc(_v) atomic64_add_return(1, _v) +#define atomic64_inc_return(_v) atomic64_add_return(1, _v) +#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) + +static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) { - return __CSG_LOOP(v, 1, "sgr") == 0; + return __CSG_LOOP(v, i, "sgr"); } +#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) +#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) +#define atomic64_dec(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) + static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) { __CSG_LOOP(v, ~mask, "ngr"); } + static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) { __CSG_LOOP(v, mask, "ogr"); } -#undef __CSG_LOOP -#endif - -/* - returns 0 if expected_oldval==value in *v ( swap was successful ) - returns 1 if unsuccessful. +static __inline__ long long atomic64_cmpxchg(atomic64_t *v, + long long old, long long new) +{ + __asm__ __volatile__(" csg %0,%3,0(%2)\n" + : "+d" (old), "=m" (v->counter) + : "a" (v), "d" (new), "m" (v->counter) + : "cc", "memory" ); + return old; +} - This is non-portable, use bitops or spinlocks instead! -*/ -static __inline__ int -atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) +static __inline__ int atomic64_add_unless(atomic64_t *v, + long long a, long long u) { - int retval; - - __asm__ __volatile__( - " lr %0,%3\n" - " cs %0,%4,0(%2)\n" - " ipm %0\n" - " srl %0,28\n" - "0:" - : "=&d" (retval), "=m" (v->counter) - : "a" (v), "d" (expected_oldval) , "d" (new_val), - "m" (v->counter) : "cc", "memory" ); - return retval; + long long c, old; + + c = atomic64_read(v); + while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) + c = old; + return c != u; } -#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#undef __CSG_LOOP +#endif #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include <asm-generic/atomic.h> #endif /* __KERNEL__ */ #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h index 3eb231a..12456cb 100644 --- a/include/asm-s390/ccwdev.h +++ b/include/asm-s390/ccwdev.h @@ -185,8 +185,5 @@ extern struct ccw_device *ccw_device_probe_console(void); extern int _ccw_device_get_device_number(struct ccw_device *); extern int _ccw_device_get_subchannel_number(struct ccw_device *); -extern struct device *s390_root_dev_register(const char *); -extern void s390_root_dev_unregister(struct device *); - extern void *ccw_device_get_chp_desc(struct ccw_device *, int); #endif /* _S390_CCWDEV_H_ */ diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h index ea86bd1..c8d5409 100644 --- a/include/asm-s390/mman.h +++ b/include/asm-s390/mman.h @@ -43,6 +43,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h index 0ddf0a8..7bc15f0 100644 --- a/include/asm-s390/qdio.h +++ b/include/asm-s390/qdio.h @@ -195,12 +195,14 @@ struct qdr { /* * queue information block (QIB) */ -#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80 -#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 +#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80 +#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 +#define QIB_RFLAGS_ENABLE_QEBSM 0x80 + struct qib { unsigned int qfmt : 8; /* queue format */ unsigned int pfmt : 8; /* impl. dep. parameter format */ - unsigned int res1 : 8; /* reserved */ + unsigned int rflags : 8; /* QEBSM */ unsigned int ac : 8; /* adapter characteristics */ unsigned int res2; /* reserved */ #ifdef QDIO_32_BIT diff --git a/include/asm-s390/s390_rdev.h b/include/asm-s390/s390_rdev.h new file mode 100644 index 0000000..3ad78f2 --- /dev/null +++ b/include/asm-s390/s390_rdev.h @@ -0,0 +1,15 @@ +/* + * include/asm-s390/ccwdev.h + * + * Copyright (C) 2002,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Cornelia Huck <cohuck@de.ibm.com> + * Carsten Otte <cotte@de.ibm.com> + * + * Interface for s390 root device + */ + +#ifndef _S390_RDEV_H_ +#define _S390_RDEV_H_ +extern struct device *s390_root_dev_register(const char *); +extern void s390_root_dev_unregister(struct device *); +#endif /* _S390_RDEV_H_ */ diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h index 10a619d..be104f2 100644 --- a/include/asm-s390/uaccess.h +++ b/include/asm-s390/uaccess.h @@ -61,8 +61,10 @@ #define segment_eq(a,b) ((a).ar4 == (b).ar4) -#define __access_ok(addr,size) (1) - +static inline int __access_ok(const void *addr, unsigned long size) +{ + return 1; +} #define access_ok(type,addr,size) __access_ok(addr,size) /* @@ -206,25 +208,25 @@ extern int __put_user_bad(void) __attribute__((noreturn)); case 1: { \ unsigned char __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ unsigned short __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ unsigned int __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ unsigned long long __x; \ __get_user_asm(__x, ptr, __gu_err); \ - (x) = (__typeof__(*(ptr))) __x; \ + (x) = *(__typeof__(*(ptr)) *) &__x; \ break; \ }; \ default: \ diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index f97d926..2861cdc 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h @@ -539,7 +539,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION -# ifdef CONFIG_ARCH_S390_31 +# ifndef CONFIG_64BIT # define __ARCH_WANT_STAT64 # define __ARCH_WANT_SYS_TIME # endif diff --git a/include/asm-s390/vtoc.h b/include/asm-s390/vtoc.h index 41d369f..d1de5b7 100644 --- a/include/asm-s390/vtoc.h +++ b/include/asm-s390/vtoc.h @@ -176,4 +176,28 @@ struct vtoc_format7_label struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */ } __attribute__ ((packed)); +struct vtoc_cms_label { + u8 label_id[4]; /* Label identifier */ + u8 vol_id[6]; /* Volid */ + u16 version_id; /* Version identifier */ + u32 block_size; /* Disk block size */ + u32 origin_ptr; /* Disk origin pointer */ + u32 usable_count; /* Number of usable cylinders/blocks */ + u32 formatted_count; /* Maximum number of formatted cylinders/ + * blocks */ + u32 block_count; /* Disk size in CMS blocks */ + u32 used_count; /* Number of CMS blocks in use */ + u32 fst_size; /* File Status Table (FST) size */ + u32 fst_count; /* Number of FSTs per CMS block */ + u8 format_date[6]; /* Disk FORMAT date */ + u8 reserved1[2]; + u32 disk_offset; /* Disk offset when reserved*/ + u32 map_block; /* Allocation Map Block with next hole */ + u32 hblk_disp; /* Displacement into HBLK data of next hole */ + u32 user_disp; /* Displacement into user part of Allocation + * map */ + u8 reserved2[4]; + u8 segment_name[8]; /* Name of shared segment */ +} __attribute__ ((packed)); + #endif /* _ASM_S390_VTOC_H */ diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index aabfd33..618d8e0 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -140,4 +140,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* __ASM_SH_ATOMIC_H */ diff --git a/include/asm-sh/mman.h b/include/asm-sh/mman.h index 3ebab5f..693bd55 100644 --- a/include/asm-sh/mman.h +++ b/include/asm-sh/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h index 927a2bc..f3ce5c0 100644 --- a/include/asm-sh64/atomic.h +++ b/include/asm-sh64/atomic.h @@ -152,4 +152,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* __ASM_SH64_ATOMIC_H */ diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index 62bec7a..accb496 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h @@ -159,4 +159,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v) #endif /* !(__KERNEL__) */ +#include <asm-generic/atomic.h> #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/include/asm-sparc/mman.h b/include/asm-sparc/mman.h index 138eb81..98435ad 100644 --- a/include/asm-sparc/mman.h +++ b/include/asm-sparc/mman.h @@ -54,6 +54,7 @@ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ +#define MADV_REMOVE 0x6 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index 3789fe3..11f5aa5 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -96,4 +96,5 @@ extern int atomic64_sub_ret(int, atomic64_t *); #define smp_mb__after_atomic_inc() barrier() #endif +#include <asm-generic/atomic.h> #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h index 01cecf5..cb4b615 100644 --- a/include/asm-sparc64/mman.h +++ b/include/asm-sparc64/mman.h @@ -54,6 +54,7 @@ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ +#define MADV_REMOVE 0x6 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h index bede317..f5b9ab6 100644 --- a/include/asm-v850/atomic.h +++ b/include/asm-v850/atomic.h @@ -126,4 +126,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* __V850_ATOMIC_H__ */ diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h index e2b9008..edc7996 100644 --- a/include/asm-v850/mman.h +++ b/include/asm-v850/mman.h @@ -32,6 +32,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 50db9f3..72eb071 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -424,4 +424,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h index b3189fb..d32f7f5 100644 --- a/include/asm-x86_64/cacheflush.h +++ b/include/asm-x86_64/cacheflush.h @@ -27,4 +27,8 @@ void global_flush_tlb(void); int change_page_attr(struct page *page, int numpages, pgprot_t prot); int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +#endif + #endif /* _X8664_CACHEFLUSH_H */ diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h index 78e60a4..d0e97b7 100644 --- a/include/asm-x86_64/mman.h +++ b/include/asm-x86_64/mman.h @@ -36,6 +36,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h index 6f8a17d..10248a9 100644 --- a/include/asm-x86_64/mpspec.h +++ b/include/asm-x86_64/mpspec.h @@ -76,7 +76,7 @@ struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; - unsigned char mpc_bustype[6] __attribute((packed)); + unsigned char mpc_bustype[6]; }; /* List of Bus Type string values, Intel MP Spec. */ diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index ecf58c7..02888d7 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long #define pte_same(a, b) ((a).pte == (b).pte) +#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) + #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PUD_SIZE (1UL << PUD_SHIFT) diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 85348e0..b34cc2e 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -315,6 +315,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") +/* used when interrupts are already enabled or to shutdown the processor */ +#define halt() __asm__ __volatile__("hlt": : :"memory") #define irqs_disabled() \ ({ \ diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index 3670cc7..e2ce06b 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -286,6 +286,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h index 9a95a45..082a750 100644 --- a/include/asm-xtensa/mman.h +++ b/include/asm-xtensa/mman.h @@ -72,6 +72,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/keys/user-type.h b/include/keys/user-type.h index 26f6ec3..a3dae18 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -35,7 +35,6 @@ struct user_key_payload { extern struct key_type key_type_user; extern int user_instantiate(struct key *key, const void *data, size_t datalen); -extern int user_duplicate(struct key *key, const struct key *source); extern int user_update(struct key *key, const void *data, size_t datalen); extern int user_match(const struct key *key, const void *criterion); extern void user_destroy(struct key *key); diff --git a/include/linux/ata.h b/include/linux/ata.h index f63dad4..94f77cc 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -143,6 +143,8 @@ enum { ATA_CMD_PACKET = 0xA0, ATA_CMD_VERIFY = 0x40, ATA_CMD_VERIFY_EXT = 0x42, + ATA_CMD_STANDBYNOW1 = 0xE0, + ATA_CMD_IDLEIMMEDIATE = 0xE1, ATA_CMD_INIT_DEV_PARAMS = 0x91, /* SETFEATURES stuff */ diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 3b03b0b..993da8c 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -43,50 +43,38 @@ typedef struct bootmem_data { extern unsigned long __init bootmem_bootmap_pages (unsigned long); extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend); extern void __init free_bootmem (unsigned long addr, unsigned long size); -extern void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, unsigned long limit); +extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal); +extern void * __init __alloc_bootmem_low(unsigned long size, + unsigned long align, + unsigned long goal); +extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE extern void __init reserve_bootmem (unsigned long addr, unsigned long size); #define alloc_bootmem(x) \ __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ - __alloc_bootmem((x), SMP_CACHE_BYTES, 0) + __alloc_bootmem_low((x), SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ - __alloc_bootmem((x), PAGE_SIZE, 0) - -#define alloc_bootmem_limit(x, limit) \ - __alloc_bootmem_limit((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit)) -#define alloc_bootmem_low_limit(x, limit) \ - __alloc_bootmem_limit((x), SMP_CACHE_BYTES, 0, (limit)) -#define alloc_bootmem_pages_limit(x, limit) \ - __alloc_bootmem_limit((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit)) -#define alloc_bootmem_low_pages_limit(x, limit) \ - __alloc_bootmem_limit((x), PAGE_SIZE, 0, (limit)) - + __alloc_bootmem_low((x), PAGE_SIZE, 0) #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ extern unsigned long __init free_all_bootmem (void); - +extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn); extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size); extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size); extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat); -extern void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit); #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages_node(pgdat, x) \ - __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0) - -#define alloc_bootmem_node_limit(pgdat, x, limit) \ - __alloc_bootmem_node_limit((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit)) -#define alloc_bootmem_pages_node_limit(pgdat, x, limit) \ - __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit)) -#define alloc_bootmem_low_pages_node_limit(pgdat, x, limit) \ - __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, 0, (limit)) - + __alloc_bootmem_low_node((pgdat), (x), PAGE_SIZE, 0) #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP @@ -123,15 +111,5 @@ extern void *__init alloc_large_system_hash(const char *tablename, #endif extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */ -static inline void *__alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) -{ - return __alloc_bootmem_limit(size, align, goal, 0); -} - -static inline void *__alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal) -{ - return __alloc_bootmem_node_limit(pgdat, size, align, goal, 0); -} #endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index f5eb6b6..fa75ba0 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -272,9 +272,9 @@ typedef char ioctl_struct[308]; #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 4 +#define DM_VERSION_MINOR 5 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2005-01-12)" +#define DM_VERSION_EXTRA "-ioctl (2005-10-04)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -301,8 +301,13 @@ typedef char ioctl_struct[308]; #define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ /* - * Set this to improve performance when you aren't going to use open_count + * Set this to improve performance when you aren't going to use open_count. */ #define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ +/* + * Set this to avoid attempting to freeze any filesystem when suspending. + */ +#define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */ + #endif /* _LINUX_DM_IOCTL_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index ed9a41a..115e72b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1050,6 +1050,7 @@ struct inode_operations { ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); + void (*truncate_range)(struct inode *, loff_t, loff_t); }; struct seq_file; diff --git a/include/linux/fuse.h b/include/linux/fuse.h index b76b558..528959c 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -14,7 +14,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 3 +#define FUSE_KERNEL_MINOR_VERSION 5 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -53,6 +53,9 @@ struct fuse_kstatfs { __u64 ffree; __u32 bsize; __u32 namelen; + __u32 frsize; + __u32 padding; + __u32 spare[6]; }; #define FATTR_MODE (1 << 0) @@ -105,12 +108,8 @@ enum fuse_opcode { FUSE_CREATE = 35 }; -/* Conservative buffer size for the client */ -#define FUSE_MAX_IN 8192 - -#define FUSE_NAME_MAX 1024 -#define FUSE_SYMLINK_MAX 4096 -#define FUSE_XATTR_SIZE_MAX 4096 +/* The read buffer is required to be at least 8k, but may be much larger */ +#define FUSE_MIN_READ_BUFFER 8192 struct fuse_entry_out { __u64 nodeid; /* Inode ID */ @@ -213,6 +212,8 @@ struct fuse_write_out { __u32 padding; }; +#define FUSE_COMPAT_STATFS_SIZE 48 + struct fuse_statfs_out { struct fuse_kstatfs st; }; @@ -243,9 +244,16 @@ struct fuse_access_in { __u32 padding; }; -struct fuse_init_in_out { +struct fuse_init_in { + __u32 major; + __u32 minor; +}; + +struct fuse_init_out { __u32 major; __u32 minor; + __u32 unused[3]; + __u32 max_write; }; struct fuse_in_header { diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 1056717..68d82ad 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -22,7 +22,7 @@ int hugetlb_report_meminfo(char *); int hugetlb_report_node_meminfo(int, char *); int is_hugepage_mem_enough(size_t); unsigned long hugetlb_total_pages(void); -struct page *alloc_huge_page(void); +struct page *alloc_huge_page(struct vm_area_struct *, unsigned long); void free_huge_page(struct page *); int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access); @@ -97,7 +97,7 @@ static inline unsigned long hugetlb_total_pages(void) #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ do { } while (0) -#define alloc_huge_page() ({ NULL; }) +#define alloc_huge_page(vma, addr) ({ NULL; }) #define free_huge_page(p) ({ (void)(p); BUG(); }) #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) diff --git a/include/linux/i2o.h b/include/linux/i2o.h index d79c8a4..9ba8067 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -30,6 +30,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/workqueue.h> /* work_struct */ +#include <linux/mempool.h> #include <asm/io.h> #include <asm/semaphore.h> /* Needed for MUTEX init macros */ @@ -38,6 +39,355 @@ #define I2O_QUEUE_EMPTY 0xffffffff /* + * Cache strategies + */ + +/* The NULL strategy leaves everything up to the controller. This tends to be a + * pessimal but functional choice. + */ +#define CACHE_NULL 0 +/* Prefetch data when reading. We continually attempt to load the next 32 sectors + * into the controller cache. + */ +#define CACHE_PREFETCH 1 +/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors + * into the controller cache. When an I/O is less <= 8K we assume its probably + * not sequential and don't prefetch (default) + */ +#define CACHE_SMARTFETCH 2 +/* Data is written to the cache and then out on to the disk. The I/O must be + * physically on the medium before the write is acknowledged (default without + * NVRAM) + */ +#define CACHE_WRITETHROUGH 17 +/* Data is written to the cache and then out on to the disk. The controller + * is permitted to write back the cache any way it wants. (default if battery + * backed NVRAM is present). It can be useful to set this for swap regardless of + * battery state. + */ +#define CACHE_WRITEBACK 18 +/* Optimise for under powered controllers, especially on RAID1 and RAID0. We + * write large I/O's directly to disk bypassing the cache to avoid the extra + * memory copy hits. Small writes are writeback cached + */ +#define CACHE_SMARTBACK 19 +/* Optimise for under powered controllers, especially on RAID1 and RAID0. We + * write large I/O's directly to disk bypassing the cache to avoid the extra + * memory copy hits. Small writes are writethrough cached. Suitable for devices + * lacking battery backup + */ +#define CACHE_SMARTTHROUGH 20 + +/* + * Ioctl structures + */ + +#define BLKI2OGRSTRAT _IOR('2', 1, int) +#define BLKI2OGWSTRAT _IOR('2', 2, int) +#define BLKI2OSRSTRAT _IOW('2', 3, int) +#define BLKI2OSWSTRAT _IOW('2', 4, int) + +/* + * I2O Function codes + */ + +/* + * Executive Class + */ +#define I2O_CMD_ADAPTER_ASSIGN 0xB3 +#define I2O_CMD_ADAPTER_READ 0xB2 +#define I2O_CMD_ADAPTER_RELEASE 0xB5 +#define I2O_CMD_BIOS_INFO_SET 0xA5 +#define I2O_CMD_BOOT_DEVICE_SET 0xA7 +#define I2O_CMD_CONFIG_VALIDATE 0xBB +#define I2O_CMD_CONN_SETUP 0xCA +#define I2O_CMD_DDM_DESTROY 0xB1 +#define I2O_CMD_DDM_ENABLE 0xD5 +#define I2O_CMD_DDM_QUIESCE 0xC7 +#define I2O_CMD_DDM_RESET 0xD9 +#define I2O_CMD_DDM_SUSPEND 0xAF +#define I2O_CMD_DEVICE_ASSIGN 0xB7 +#define I2O_CMD_DEVICE_RELEASE 0xB9 +#define I2O_CMD_HRT_GET 0xA8 +#define I2O_CMD_ADAPTER_CLEAR 0xBE +#define I2O_CMD_ADAPTER_CONNECT 0xC9 +#define I2O_CMD_ADAPTER_RESET 0xBD +#define I2O_CMD_LCT_NOTIFY 0xA2 +#define I2O_CMD_OUTBOUND_INIT 0xA1 +#define I2O_CMD_PATH_ENABLE 0xD3 +#define I2O_CMD_PATH_QUIESCE 0xC5 +#define I2O_CMD_PATH_RESET 0xD7 +#define I2O_CMD_STATIC_MF_CREATE 0xDD +#define I2O_CMD_STATIC_MF_RELEASE 0xDF +#define I2O_CMD_STATUS_GET 0xA0 +#define I2O_CMD_SW_DOWNLOAD 0xA9 +#define I2O_CMD_SW_UPLOAD 0xAB +#define I2O_CMD_SW_REMOVE 0xAD +#define I2O_CMD_SYS_ENABLE 0xD1 +#define I2O_CMD_SYS_MODIFY 0xC1 +#define I2O_CMD_SYS_QUIESCE 0xC3 +#define I2O_CMD_SYS_TAB_SET 0xA3 + +/* + * Utility Class + */ +#define I2O_CMD_UTIL_NOP 0x00 +#define I2O_CMD_UTIL_ABORT 0x01 +#define I2O_CMD_UTIL_CLAIM 0x09 +#define I2O_CMD_UTIL_RELEASE 0x0B +#define I2O_CMD_UTIL_PARAMS_GET 0x06 +#define I2O_CMD_UTIL_PARAMS_SET 0x05 +#define I2O_CMD_UTIL_EVT_REGISTER 0x13 +#define I2O_CMD_UTIL_EVT_ACK 0x14 +#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10 +#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D +#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F +#define I2O_CMD_UTIL_LOCK 0x17 +#define I2O_CMD_UTIL_LOCK_RELEASE 0x19 +#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15 + +/* + * SCSI Host Bus Adapter Class + */ +#define I2O_CMD_SCSI_EXEC 0x81 +#define I2O_CMD_SCSI_ABORT 0x83 +#define I2O_CMD_SCSI_BUSRESET 0x27 + +/* + * Bus Adapter Class + */ +#define I2O_CMD_BUS_ADAPTER_RESET 0x85 +#define I2O_CMD_BUS_RESET 0x87 +#define I2O_CMD_BUS_SCAN 0x89 +#define I2O_CMD_BUS_QUIESCE 0x8b + +/* + * Random Block Storage Class + */ +#define I2O_CMD_BLOCK_READ 0x30 +#define I2O_CMD_BLOCK_WRITE 0x31 +#define I2O_CMD_BLOCK_CFLUSH 0x37 +#define I2O_CMD_BLOCK_MLOCK 0x49 +#define I2O_CMD_BLOCK_MUNLOCK 0x4B +#define I2O_CMD_BLOCK_MMOUNT 0x41 +#define I2O_CMD_BLOCK_MEJECT 0x43 +#define I2O_CMD_BLOCK_POWER 0x70 + +#define I2O_CMD_PRIVATE 0xFF + +/* Command status values */ + +#define I2O_CMD_IN_PROGRESS 0x01 +#define I2O_CMD_REJECTED 0x02 +#define I2O_CMD_FAILED 0x03 +#define I2O_CMD_COMPLETED 0x04 + +/* I2O API function return values */ + +#define I2O_RTN_NO_ERROR 0 +#define I2O_RTN_NOT_INIT 1 +#define I2O_RTN_FREE_Q_EMPTY 2 +#define I2O_RTN_TCB_ERROR 3 +#define I2O_RTN_TRANSACTION_ERROR 4 +#define I2O_RTN_ADAPTER_ALREADY_INIT 5 +#define I2O_RTN_MALLOC_ERROR 6 +#define I2O_RTN_ADPTR_NOT_REGISTERED 7 +#define I2O_RTN_MSG_REPLY_TIMEOUT 8 +#define I2O_RTN_NO_STATUS 9 +#define I2O_RTN_NO_FIRM_VER 10 +#define I2O_RTN_NO_LINK_SPEED 11 + +/* Reply message status defines for all messages */ + +#define I2O_REPLY_STATUS_SUCCESS 0x00 +#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 +#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 +#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 +#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 +#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 +#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06 +#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08 +#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09 +#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A +#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B +#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80 + +/* Status codes and Error Information for Parameter functions */ + +#define I2O_PARAMS_STATUS_SUCCESS 0x00 +#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 +#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 +#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 +#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 +#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 +#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06 +#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07 +#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08 +#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09 +#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A +#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B +#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C +#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D +#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E +#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F +#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10 + +/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error + * messages: Table 3-2 Detailed Status Codes.*/ + +#define I2O_DSC_SUCCESS 0x0000 +#define I2O_DSC_BAD_KEY 0x0002 +#define I2O_DSC_TCL_ERROR 0x0003 +#define I2O_DSC_REPLY_BUFFER_FULL 0x0004 +#define I2O_DSC_NO_SUCH_PAGE 0x0005 +#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006 +#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007 +#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009 +#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A +#define I2O_DSC_DEVICE_LOCKED 0x000B +#define I2O_DSC_DEVICE_RESET 0x000C +#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D +#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E +#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F +#define I2O_DSC_INVALID_OFFSET 0x0010 +#define I2O_DSC_INVALID_PARAMETER 0x0011 +#define I2O_DSC_INVALID_REQUEST 0x0012 +#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013 +#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014 +#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015 +#define I2O_DSC_MISSING_PARAMETER 0x0016 +#define I2O_DSC_TIMEOUT 0x0017 +#define I2O_DSC_UNKNOWN_ERROR 0x0018 +#define I2O_DSC_UNKNOWN_FUNCTION 0x0019 +#define I2O_DSC_UNSUPPORTED_VERSION 0x001A +#define I2O_DSC_DEVICE_BUSY 0x001B +#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C + +/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed + Status Codes.*/ + +#define I2O_BSA_DSC_SUCCESS 0x0000 +#define I2O_BSA_DSC_MEDIA_ERROR 0x0001 +#define I2O_BSA_DSC_ACCESS_ERROR 0x0002 +#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003 +#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004 +#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005 +#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006 +#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007 +#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008 +#define I2O_BSA_DSC_BUS_FAILURE 0x0009 +#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A +#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B +#define I2O_BSA_DSC_DEVICE_RESET 0x000C +#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D +#define I2O_BSA_DSC_TIMEOUT 0x000E + +/* FailureStatusCodes, Table 3-3 Message Failure Codes */ + +#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81 +#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82 +#define I2O_FSC_TRANSPORT_CONGESTION 0x83 +#define I2O_FSC_TRANSPORT_FAILURE 0x84 +#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85 +#define I2O_FSC_TRANSPORT_TIME_OUT 0x86 +#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87 +#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88 +#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89 +#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A +#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B +#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C +#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D +#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E +#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F +#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF + +/* Device Claim Types */ +#define I2O_CLAIM_PRIMARY 0x01000000 +#define I2O_CLAIM_MANAGEMENT 0x02000000 +#define I2O_CLAIM_AUTHORIZED 0x03000000 +#define I2O_CLAIM_SECONDARY 0x04000000 + +/* Message header defines for VersionOffset */ +#define I2OVER15 0x0001 +#define I2OVER20 0x0002 + +/* Default is 1.5 */ +#define I2OVERSION I2OVER15 + +#define SGL_OFFSET_0 I2OVERSION +#define SGL_OFFSET_4 (0x0040 | I2OVERSION) +#define SGL_OFFSET_5 (0x0050 | I2OVERSION) +#define SGL_OFFSET_6 (0x0060 | I2OVERSION) +#define SGL_OFFSET_7 (0x0070 | I2OVERSION) +#define SGL_OFFSET_8 (0x0080 | I2OVERSION) +#define SGL_OFFSET_9 (0x0090 | I2OVERSION) +#define SGL_OFFSET_10 (0x00A0 | I2OVERSION) +#define SGL_OFFSET_11 (0x00B0 | I2OVERSION) +#define SGL_OFFSET_12 (0x00C0 | I2OVERSION) +#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION) + +/* Transaction Reply Lists (TRL) Control Word structure */ +#define TRL_SINGLE_FIXED_LENGTH 0x00 +#define TRL_SINGLE_VARIABLE_LENGTH 0x40 +#define TRL_MULTIPLE_FIXED_LENGTH 0x80 + + /* msg header defines for MsgFlags */ +#define MSG_STATIC 0x0100 +#define MSG_64BIT_CNTXT 0x0200 +#define MSG_MULTI_TRANS 0x1000 +#define MSG_FAIL 0x2000 +#define MSG_FINAL 0x4000 +#define MSG_REPLY 0x8000 + + /* minimum size msg */ +#define THREE_WORD_MSG_SIZE 0x00030000 +#define FOUR_WORD_MSG_SIZE 0x00040000 +#define FIVE_WORD_MSG_SIZE 0x00050000 +#define SIX_WORD_MSG_SIZE 0x00060000 +#define SEVEN_WORD_MSG_SIZE 0x00070000 +#define EIGHT_WORD_MSG_SIZE 0x00080000 +#define NINE_WORD_MSG_SIZE 0x00090000 +#define TEN_WORD_MSG_SIZE 0x000A0000 +#define ELEVEN_WORD_MSG_SIZE 0x000B0000 +#define I2O_MESSAGE_SIZE(x) ((x)<<16) + +/* special TID assignments */ +#define ADAPTER_TID 0 +#define HOST_TID 1 + +/* outbound queue defines */ +#define I2O_MAX_OUTBOUND_MSG_FRAMES 128 +#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ + +/* inbound queue definitions */ +#define I2O_MSG_INPOOL_MIN 32 +#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ + +#define I2O_POST_WAIT_OK 0 +#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT + +#define I2O_CONTEXT_LIST_MIN_LENGTH 15 +#define I2O_CONTEXT_LIST_USED 0x01 +#define I2O_CONTEXT_LIST_DELETED 0x02 + +/* timeouts */ +#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15 +#define I2O_TIMEOUT_MESSAGE_GET 5 +#define I2O_TIMEOUT_RESET 30 +#define I2O_TIMEOUT_STATUS_GET 5 +#define I2O_TIMEOUT_LCT_GET 360 +#define I2O_TIMEOUT_SCSI_SCB_ABORT 240 + +/* retries */ +#define I2O_HRT_GET_TRIES 3 +#define I2O_LCT_GET_TRIES 3 + +/* defines for max_sectors and max_phys_segments */ +#define I2O_MAX_SECTORS 1024 +#define I2O_MAX_SECTORS_LIMITED 128 +#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS + +/* * Message structures */ struct i2o_message { @@ -58,6 +408,12 @@ struct i2o_message { u32 body[0]; }; +/* MFA and I2O message used by mempool */ +struct i2o_msg_mfa { + u32 mfa; /* MFA returned by the controller */ + struct i2o_message msg; /* I2O message */ +}; + /* * Each I2O device entity has one of these. There is one per device. */ @@ -130,6 +486,15 @@ struct i2o_dma { }; /* + * Contains slab cache and mempool information + */ +struct i2o_pool { + char *name; + kmem_cache_t *slab; + mempool_t *mempool; +}; + +/* * Contains IO mapped address information */ struct i2o_io { @@ -174,8 +539,6 @@ struct i2o_controller { void __iomem *irq_status; /* Interrupt status register address */ void __iomem *irq_mask; /* Interrupt mask register address */ - /* Dynamic LCT related data */ - struct i2o_dma status; /* IOP status block */ struct i2o_dma hrt; /* HW Resource Table */ @@ -188,6 +551,8 @@ struct i2o_controller { struct i2o_io in_queue; /* inbound message queue Host->IOP */ struct i2o_dma out_queue; /* outbound message queue IOP->Host */ + struct i2o_pool in_msg; /* mempool for inbound messages */ + unsigned int battery:1; /* Has a battery backup */ unsigned int io_alloc:1; /* An I/O resource was allocated */ unsigned int mem_alloc:1; /* A memory resource was allocated */ @@ -196,7 +561,6 @@ struct i2o_controller { struct resource mem_resource; /* Mem resource allocated to the IOP */ struct device device; - struct class_device *classdev; /* I2O controller class device */ struct i2o_device *exec; /* Executive */ #if BITS_PER_LONG == 64 spinlock_t context_list_lock; /* lock for context_list */ @@ -247,16 +611,13 @@ struct i2o_sys_tbl { extern struct list_head i2o_controllers; /* Message functions */ -static inline u32 i2o_msg_get(struct i2o_controller *, - struct i2o_message __iomem **); -extern u32 i2o_msg_get_wait(struct i2o_controller *, - struct i2o_message __iomem **, int); -static inline void i2o_msg_post(struct i2o_controller *, u32); -static inline int i2o_msg_post_wait(struct i2o_controller *, u32, - unsigned long); -extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long, - struct i2o_dma *); -extern void i2o_msg_nop(struct i2o_controller *, u32); +static inline struct i2o_message *i2o_msg_get(struct i2o_controller *); +extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int); +static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *); +static inline int i2o_msg_post_wait(struct i2o_controller *, + struct i2o_message *, unsigned long); +extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *, + unsigned long, struct i2o_dma *); static inline void i2o_flush_reply(struct i2o_controller *, u32); /* IOP functions */ @@ -384,10 +745,10 @@ static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, size_t size, enum dma_data_direction direction, - u32 __iomem ** sg_ptr) + u32 ** sg_ptr) { u32 sg_flags; - u32 __iomem *mptr = *sg_ptr; + u32 *mptr = *sg_ptr; dma_addr_t dma_addr; switch (direction) { @@ -405,16 +766,16 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, if (!dma_mapping_error(dma_addr)) { #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { - writel(0x7C020002, mptr++); - writel(PAGE_SIZE, mptr++); + *mptr++ = cpu_to_le32(0x7C020002); + *mptr++ = cpu_to_le32(PAGE_SIZE); } #endif - writel(sg_flags | size, mptr++); - writel(i2o_dma_low(dma_addr), mptr++); + *mptr++ = cpu_to_le32(sg_flags | size); + *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) - writel(i2o_dma_high(dma_addr), mptr++); + *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); #endif *sg_ptr = mptr; } @@ -439,10 +800,10 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, static inline int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, int sg_count, enum dma_data_direction direction, - u32 __iomem ** sg_ptr) + u32 ** sg_ptr) { u32 sg_flags; - u32 __iomem *mptr = *sg_ptr; + u32 *mptr = *sg_ptr; switch (direction) { case DMA_TO_DEVICE: @@ -461,19 +822,19 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c, #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { - writel(0x7C020002, mptr++); - writel(PAGE_SIZE, mptr++); + *mptr++ = cpu_to_le32(0x7C020002); + *mptr++ = cpu_to_le32(PAGE_SIZE); } #endif while (sg_count-- > 0) { if (!sg_count) sg_flags |= 0xC0000000; - writel(sg_flags | sg_dma_len(sg), mptr++); - writel(i2o_dma_low(sg_dma_address(sg)), mptr++); + *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); + *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) - writel(i2o_dma_high(sg_dma_address(sg)), mptr++); + *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); #endif sg++; } @@ -563,6 +924,64 @@ static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, return 0; }; +/* + * i2o_pool_alloc - Allocate an slab cache and mempool + * @mempool: pointer to struct i2o_pool to write data into. + * @name: name which is used to identify cache + * @size: size of each object + * @min_nr: minimum number of objects + * + * First allocates a slab cache with name and size. Then allocates a + * mempool which uses the slab cache for allocation and freeing. + * + * Returns 0 on success or negative error code on failure. + */ +static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, + size_t size, int min_nr) +{ + pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); + if (!pool->name) + goto exit; + strcpy(pool->name, name); + + pool->slab = + kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL, + NULL); + if (!pool->slab) + goto free_name; + + pool->mempool = + mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, + pool->slab); + if (!pool->mempool) + goto free_slab; + + return 0; + + free_slab: + kmem_cache_destroy(pool->slab); + + free_name: + kfree(pool->name); + + exit: + return -ENOMEM; +}; + +/* + * i2o_pool_free - Free slab cache and mempool again + * @mempool: pointer to struct i2o_pool which should be freed + * + * Note that you have to return all objects to the mempool again before + * calling i2o_pool_free(). + */ +static inline void i2o_pool_free(struct i2o_pool *pool) +{ + mempool_destroy(pool->mempool); + kmem_cache_destroy(pool->slab); + kfree(pool->name); +}; + /* I2O driver (OSM) functions */ extern int i2o_driver_register(struct i2o_driver *); extern void i2o_driver_unregister(struct i2o_driver *); @@ -638,39 +1057,89 @@ extern int i2o_exec_lct_get(struct i2o_controller *); #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) /** + * i2o_out_to_virt - Turn an I2O message to a virtual address + * @c: controller + * @m: message engine value + * + * Turn a receive message from an I2O controller bus address into + * a Linux virtual address. The shared page frame is a linear block + * so we simply have to shift the offset. This function does not + * work for sender side messages as they are ioremap objects + * provided by the I2O controller. + */ +static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, + u32 m) +{ + BUG_ON(m < c->out_queue.phys + || m >= c->out_queue.phys + c->out_queue.len); + + return c->out_queue.virt + (m - c->out_queue.phys); +}; + +/** + * i2o_msg_in_to_virt - Turn an I2O message to a virtual address + * @c: controller + * @m: message engine value + * + * Turn a send message from an I2O controller bus address into + * a Linux virtual address. The shared page frame is a linear block + * so we simply have to shift the offset. This function does not + * work for receive side messages as they are kmalloc objects + * in a different pool. + */ +static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct + i2o_controller *c, + u32 m) +{ + return c->in_queue.virt + m; +}; + +/** * i2o_msg_get - obtain an I2O message from the IOP * @c: I2O controller - * @msg: pointer to a I2O message pointer * - * This function tries to get a message slot. If no message slot is + * This function tries to get a message frame. If no message frame is * available do not wait until one is availabe (see also i2o_msg_get_wait). + * The returned pointer to the message frame is not in I/O memory, it is + * allocated from a mempool. But because a MFA is allocated from the + * controller too it is guaranteed that i2o_msg_post() will never fail. * - * On a success the message is returned and the pointer to the message is - * set in msg. The returned message is the physical page frame offset - * address from the read port (see the i2o spec). If no message is - * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. + * On a success a pointer to the message frame is returned. If the message + * queue is empty -EBUSY is returned and if no memory is available -ENOMEM + * is returned. */ -static inline u32 i2o_msg_get(struct i2o_controller *c, - struct i2o_message __iomem ** msg) +static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c) { - u32 m = readl(c->in_port); - - if (m != I2O_QUEUE_EMPTY) - *msg = c->in_queue.virt + m; + struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC); + if (!mmsg) + return ERR_PTR(-ENOMEM); + + mmsg->mfa = readl(c->in_port); + if (mmsg->mfa == I2O_QUEUE_EMPTY) { + mempool_free(mmsg, c->in_msg.mempool); + return ERR_PTR(-EBUSY); + } - return m; + return &mmsg->msg; }; /** * i2o_msg_post - Post I2O message to I2O controller * @c: I2O controller to which the message should be send - * @m: the message identifier + * @msg: message returned by i2o_msg_get() * - * Post the message to the I2O controller. + * Post the message to the I2O controller and return immediately. */ -static inline void i2o_msg_post(struct i2o_controller *c, u32 m) +static inline void i2o_msg_post(struct i2o_controller *c, + struct i2o_message *msg) { - writel(m, c->in_port); + struct i2o_msg_mfa *mmsg; + + mmsg = container_of(msg, struct i2o_msg_mfa, msg); + memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg, + (le32_to_cpu(msg->u.head[0]) >> 16) << 2); + writel(mmsg->mfa, c->in_port); + mempool_free(mmsg, c->in_msg.mempool); }; /** @@ -685,62 +1154,66 @@ static inline void i2o_msg_post(struct i2o_controller *c, u32 m) * * Returns 0 on success or negative error code on failure. */ -static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m, +static inline int i2o_msg_post_wait(struct i2o_controller *c, + struct i2o_message *msg, unsigned long timeout) { - return i2o_msg_post_wait_mem(c, m, timeout, NULL); + return i2o_msg_post_wait_mem(c, msg, timeout, NULL); }; /** - * i2o_flush_reply - Flush reply from I2O controller - * @c: I2O controller - * @m: the message identifier + * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller + * @c: I2O controller from which the MFA was fetched + * @mfa: MFA which should be returned * - * The I2O controller must be informed that the reply message is not needed - * anymore. If you forget to flush the reply, the message frame can't be - * used by the controller anymore and is therefore lost. + * This function must be used for preserved messages, because i2o_msg_nop() + * also returns the allocated memory back to the msg_pool mempool. */ -static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) +static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa) { - writel(m, c->out_port); + struct i2o_message __iomem *msg; + u32 nop[3] = { + THREE_WORD_MSG_SIZE | SGL_OFFSET_0, + I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, + 0x00000000 + }; + + msg = i2o_msg_in_to_virt(c, mfa); + memcpy_toio(msg, nop, sizeof(nop)); + writel(mfa, c->in_port); }; /** - * i2o_out_to_virt - Turn an I2O message to a virtual address - * @c: controller - * @m: message engine value + * i2o_msg_nop - Returns a message which is not used + * @c: I2O controller from which the message was created + * @msg: message which should be returned * - * Turn a receive message from an I2O controller bus address into - * a Linux virtual address. The shared page frame is a linear block - * so we simply have to shift the offset. This function does not - * work for sender side messages as they are ioremap objects - * provided by the I2O controller. + * If you fetch a message via i2o_msg_get, and can't use it, you must + * return the message with this function. Otherwise the MFA is lost as well + * as the allocated memory from the mempool. */ -static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, - u32 m) +static inline void i2o_msg_nop(struct i2o_controller *c, + struct i2o_message *msg) { - BUG_ON(m < c->out_queue.phys - || m >= c->out_queue.phys + c->out_queue.len); + struct i2o_msg_mfa *mmsg; + mmsg = container_of(msg, struct i2o_msg_mfa, msg); - return c->out_queue.virt + (m - c->out_queue.phys); + i2o_msg_nop_mfa(c, mmsg->mfa); + mempool_free(mmsg, c->in_msg.mempool); }; /** - * i2o_msg_in_to_virt - Turn an I2O message to a virtual address - * @c: controller - * @m: message engine value + * i2o_flush_reply - Flush reply from I2O controller + * @c: I2O controller + * @m: the message identifier * - * Turn a send message from an I2O controller bus address into - * a Linux virtual address. The shared page frame is a linear block - * so we simply have to shift the offset. This function does not - * work for receive side messages as they are kmalloc objects - * in a different pool. + * The I2O controller must be informed that the reply message is not needed + * anymore. If you forget to flush the reply, the message frame can't be + * used by the controller anymore and is therefore lost. */ -static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct - i2o_controller *c, - u32 m) +static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) { - return c->in_queue.virt + m; + writel(m, c->out_port); }; /* @@ -779,350 +1252,5 @@ extern void i2o_dump_message(struct i2o_message *); extern void i2o_dump_hrt(struct i2o_controller *c); extern void i2o_debug_state(struct i2o_controller *c); -/* - * Cache strategies - */ - -/* The NULL strategy leaves everything up to the controller. This tends to be a - * pessimal but functional choice. - */ -#define CACHE_NULL 0 -/* Prefetch data when reading. We continually attempt to load the next 32 sectors - * into the controller cache. - */ -#define CACHE_PREFETCH 1 -/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors - * into the controller cache. When an I/O is less <= 8K we assume its probably - * not sequential and don't prefetch (default) - */ -#define CACHE_SMARTFETCH 2 -/* Data is written to the cache and then out on to the disk. The I/O must be - * physically on the medium before the write is acknowledged (default without - * NVRAM) - */ -#define CACHE_WRITETHROUGH 17 -/* Data is written to the cache and then out on to the disk. The controller - * is permitted to write back the cache any way it wants. (default if battery - * backed NVRAM is present). It can be useful to set this for swap regardless of - * battery state. - */ -#define CACHE_WRITEBACK 18 -/* Optimise for under powered controllers, especially on RAID1 and RAID0. We - * write large I/O's directly to disk bypassing the cache to avoid the extra - * memory copy hits. Small writes are writeback cached - */ -#define CACHE_SMARTBACK 19 -/* Optimise for under powered controllers, especially on RAID1 and RAID0. We - * write large I/O's directly to disk bypassing the cache to avoid the extra - * memory copy hits. Small writes are writethrough cached. Suitable for devices - * lacking battery backup - */ -#define CACHE_SMARTTHROUGH 20 - -/* - * Ioctl structures - */ - -#define BLKI2OGRSTRAT _IOR('2', 1, int) -#define BLKI2OGWSTRAT _IOR('2', 2, int) -#define BLKI2OSRSTRAT _IOW('2', 3, int) -#define BLKI2OSWSTRAT _IOW('2', 4, int) - -/* - * I2O Function codes - */ - -/* - * Executive Class - */ -#define I2O_CMD_ADAPTER_ASSIGN 0xB3 -#define I2O_CMD_ADAPTER_READ 0xB2 -#define I2O_CMD_ADAPTER_RELEASE 0xB5 -#define I2O_CMD_BIOS_INFO_SET 0xA5 -#define I2O_CMD_BOOT_DEVICE_SET 0xA7 -#define I2O_CMD_CONFIG_VALIDATE 0xBB -#define I2O_CMD_CONN_SETUP 0xCA -#define I2O_CMD_DDM_DESTROY 0xB1 -#define I2O_CMD_DDM_ENABLE 0xD5 -#define I2O_CMD_DDM_QUIESCE 0xC7 -#define I2O_CMD_DDM_RESET 0xD9 -#define I2O_CMD_DDM_SUSPEND 0xAF -#define I2O_CMD_DEVICE_ASSIGN 0xB7 -#define I2O_CMD_DEVICE_RELEASE 0xB9 -#define I2O_CMD_HRT_GET 0xA8 -#define I2O_CMD_ADAPTER_CLEAR 0xBE -#define I2O_CMD_ADAPTER_CONNECT 0xC9 -#define I2O_CMD_ADAPTER_RESET 0xBD -#define I2O_CMD_LCT_NOTIFY 0xA2 -#define I2O_CMD_OUTBOUND_INIT 0xA1 -#define I2O_CMD_PATH_ENABLE 0xD3 -#define I2O_CMD_PATH_QUIESCE 0xC5 -#define I2O_CMD_PATH_RESET 0xD7 -#define I2O_CMD_STATIC_MF_CREATE 0xDD -#define I2O_CMD_STATIC_MF_RELEASE 0xDF -#define I2O_CMD_STATUS_GET 0xA0 -#define I2O_CMD_SW_DOWNLOAD 0xA9 -#define I2O_CMD_SW_UPLOAD 0xAB -#define I2O_CMD_SW_REMOVE 0xAD -#define I2O_CMD_SYS_ENABLE 0xD1 -#define I2O_CMD_SYS_MODIFY 0xC1 -#define I2O_CMD_SYS_QUIESCE 0xC3 -#define I2O_CMD_SYS_TAB_SET 0xA3 - -/* - * Utility Class - */ -#define I2O_CMD_UTIL_NOP 0x00 -#define I2O_CMD_UTIL_ABORT 0x01 -#define I2O_CMD_UTIL_CLAIM 0x09 -#define I2O_CMD_UTIL_RELEASE 0x0B -#define I2O_CMD_UTIL_PARAMS_GET 0x06 -#define I2O_CMD_UTIL_PARAMS_SET 0x05 -#define I2O_CMD_UTIL_EVT_REGISTER 0x13 -#define I2O_CMD_UTIL_EVT_ACK 0x14 -#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10 -#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D -#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F -#define I2O_CMD_UTIL_LOCK 0x17 -#define I2O_CMD_UTIL_LOCK_RELEASE 0x19 -#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15 - -/* - * SCSI Host Bus Adapter Class - */ -#define I2O_CMD_SCSI_EXEC 0x81 -#define I2O_CMD_SCSI_ABORT 0x83 -#define I2O_CMD_SCSI_BUSRESET 0x27 - -/* - * Bus Adapter Class - */ -#define I2O_CMD_BUS_ADAPTER_RESET 0x85 -#define I2O_CMD_BUS_RESET 0x87 -#define I2O_CMD_BUS_SCAN 0x89 -#define I2O_CMD_BUS_QUIESCE 0x8b - -/* - * Random Block Storage Class - */ -#define I2O_CMD_BLOCK_READ 0x30 -#define I2O_CMD_BLOCK_WRITE 0x31 -#define I2O_CMD_BLOCK_CFLUSH 0x37 -#define I2O_CMD_BLOCK_MLOCK 0x49 -#define I2O_CMD_BLOCK_MUNLOCK 0x4B -#define I2O_CMD_BLOCK_MMOUNT 0x41 -#define I2O_CMD_BLOCK_MEJECT 0x43 -#define I2O_CMD_BLOCK_POWER 0x70 - -#define I2O_CMD_PRIVATE 0xFF - -/* Command status values */ - -#define I2O_CMD_IN_PROGRESS 0x01 -#define I2O_CMD_REJECTED 0x02 -#define I2O_CMD_FAILED 0x03 -#define I2O_CMD_COMPLETED 0x04 - -/* I2O API function return values */ - -#define I2O_RTN_NO_ERROR 0 -#define I2O_RTN_NOT_INIT 1 -#define I2O_RTN_FREE_Q_EMPTY 2 -#define I2O_RTN_TCB_ERROR 3 -#define I2O_RTN_TRANSACTION_ERROR 4 -#define I2O_RTN_ADAPTER_ALREADY_INIT 5 -#define I2O_RTN_MALLOC_ERROR 6 -#define I2O_RTN_ADPTR_NOT_REGISTERED 7 -#define I2O_RTN_MSG_REPLY_TIMEOUT 8 -#define I2O_RTN_NO_STATUS 9 -#define I2O_RTN_NO_FIRM_VER 10 -#define I2O_RTN_NO_LINK_SPEED 11 - -/* Reply message status defines for all messages */ - -#define I2O_REPLY_STATUS_SUCCESS 0x00 -#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 -#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 -#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 -#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 -#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 -#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06 -#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08 -#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09 -#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A -#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B -#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80 - -/* Status codes and Error Information for Parameter functions */ - -#define I2O_PARAMS_STATUS_SUCCESS 0x00 -#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 -#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 -#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 -#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 -#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 -#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06 -#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07 -#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08 -#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09 -#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A -#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B -#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C -#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D -#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E -#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F -#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10 - -/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error - * messages: Table 3-2 Detailed Status Codes.*/ - -#define I2O_DSC_SUCCESS 0x0000 -#define I2O_DSC_BAD_KEY 0x0002 -#define I2O_DSC_TCL_ERROR 0x0003 -#define I2O_DSC_REPLY_BUFFER_FULL 0x0004 -#define I2O_DSC_NO_SUCH_PAGE 0x0005 -#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006 -#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007 -#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009 -#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A -#define I2O_DSC_DEVICE_LOCKED 0x000B -#define I2O_DSC_DEVICE_RESET 0x000C -#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D -#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E -#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F -#define I2O_DSC_INVALID_OFFSET 0x0010 -#define I2O_DSC_INVALID_PARAMETER 0x0011 -#define I2O_DSC_INVALID_REQUEST 0x0012 -#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013 -#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014 -#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015 -#define I2O_DSC_MISSING_PARAMETER 0x0016 -#define I2O_DSC_TIMEOUT 0x0017 -#define I2O_DSC_UNKNOWN_ERROR 0x0018 -#define I2O_DSC_UNKNOWN_FUNCTION 0x0019 -#define I2O_DSC_UNSUPPORTED_VERSION 0x001A -#define I2O_DSC_DEVICE_BUSY 0x001B -#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C - -/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed - Status Codes.*/ - -#define I2O_BSA_DSC_SUCCESS 0x0000 -#define I2O_BSA_DSC_MEDIA_ERROR 0x0001 -#define I2O_BSA_DSC_ACCESS_ERROR 0x0002 -#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003 -#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004 -#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005 -#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006 -#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007 -#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008 -#define I2O_BSA_DSC_BUS_FAILURE 0x0009 -#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A -#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B -#define I2O_BSA_DSC_DEVICE_RESET 0x000C -#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D -#define I2O_BSA_DSC_TIMEOUT 0x000E - -/* FailureStatusCodes, Table 3-3 Message Failure Codes */ - -#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81 -#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82 -#define I2O_FSC_TRANSPORT_CONGESTION 0x83 -#define I2O_FSC_TRANSPORT_FAILURE 0x84 -#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85 -#define I2O_FSC_TRANSPORT_TIME_OUT 0x86 -#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87 -#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88 -#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89 -#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A -#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B -#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C -#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D -#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E -#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F -#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF - -/* Device Claim Types */ -#define I2O_CLAIM_PRIMARY 0x01000000 -#define I2O_CLAIM_MANAGEMENT 0x02000000 -#define I2O_CLAIM_AUTHORIZED 0x03000000 -#define I2O_CLAIM_SECONDARY 0x04000000 - -/* Message header defines for VersionOffset */ -#define I2OVER15 0x0001 -#define I2OVER20 0x0002 - -/* Default is 1.5 */ -#define I2OVERSION I2OVER15 - -#define SGL_OFFSET_0 I2OVERSION -#define SGL_OFFSET_4 (0x0040 | I2OVERSION) -#define SGL_OFFSET_5 (0x0050 | I2OVERSION) -#define SGL_OFFSET_6 (0x0060 | I2OVERSION) -#define SGL_OFFSET_7 (0x0070 | I2OVERSION) -#define SGL_OFFSET_8 (0x0080 | I2OVERSION) -#define SGL_OFFSET_9 (0x0090 | I2OVERSION) -#define SGL_OFFSET_10 (0x00A0 | I2OVERSION) -#define SGL_OFFSET_11 (0x00B0 | I2OVERSION) -#define SGL_OFFSET_12 (0x00C0 | I2OVERSION) -#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION) - -/* Transaction Reply Lists (TRL) Control Word structure */ -#define TRL_SINGLE_FIXED_LENGTH 0x00 -#define TRL_SINGLE_VARIABLE_LENGTH 0x40 -#define TRL_MULTIPLE_FIXED_LENGTH 0x80 - - /* msg header defines for MsgFlags */ -#define MSG_STATIC 0x0100 -#define MSG_64BIT_CNTXT 0x0200 -#define MSG_MULTI_TRANS 0x1000 -#define MSG_FAIL 0x2000 -#define MSG_FINAL 0x4000 -#define MSG_REPLY 0x8000 - - /* minimum size msg */ -#define THREE_WORD_MSG_SIZE 0x00030000 -#define FOUR_WORD_MSG_SIZE 0x00040000 -#define FIVE_WORD_MSG_SIZE 0x00050000 -#define SIX_WORD_MSG_SIZE 0x00060000 -#define SEVEN_WORD_MSG_SIZE 0x00070000 -#define EIGHT_WORD_MSG_SIZE 0x00080000 -#define NINE_WORD_MSG_SIZE 0x00090000 -#define TEN_WORD_MSG_SIZE 0x000A0000 -#define ELEVEN_WORD_MSG_SIZE 0x000B0000 -#define I2O_MESSAGE_SIZE(x) ((x)<<16) - -/* special TID assignments */ -#define ADAPTER_TID 0 -#define HOST_TID 1 - -/* outbound queue defines */ -#define I2O_MAX_OUTBOUND_MSG_FRAMES 128 -#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ - -#define I2O_POST_WAIT_OK 0 -#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT - -#define I2O_CONTEXT_LIST_MIN_LENGTH 15 -#define I2O_CONTEXT_LIST_USED 0x01 -#define I2O_CONTEXT_LIST_DELETED 0x02 - -/* timeouts */ -#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15 -#define I2O_TIMEOUT_MESSAGE_GET 5 -#define I2O_TIMEOUT_RESET 30 -#define I2O_TIMEOUT_STATUS_GET 5 -#define I2O_TIMEOUT_LCT_GET 360 -#define I2O_TIMEOUT_SCSI_SCB_ABORT 240 - -/* retries */ -#define I2O_HRT_GET_TRIES 3 -#define I2O_LCT_GET_TRIES 3 - -/* defines for max_sectors and max_phys_segments */ -#define I2O_MAX_SECTORS 1024 -#define I2O_MAX_SECTORS_LIMITED 256 -#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS - #endif /* __KERNEL__ */ #endif /* _I2O_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index f04ba20..6c5d4c8 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -12,7 +12,7 @@ #include <linux/config.h> #include <linux/smp.h> -#if !defined(CONFIG_ARCH_S390) +#if !defined(CONFIG_S390) #include <linux/linkage.h> #include <linux/cache.h> @@ -221,6 +221,17 @@ extern void note_interrupt(unsigned int irq, irq_desc_t *desc, extern int can_request_irq(unsigned int irq, unsigned long irqflags); extern void init_irq_proc(void); + +#ifdef CONFIG_AUTO_IRQ_AFFINITY +extern int select_smp_affinity(unsigned int irq); +#else +static inline int +select_smp_affinity(unsigned int irq) +{ + return 1; +} +#endif + #endif extern hw_irq_controller no_irq_type; /* needed in every arch ? */ diff --git a/include/linux/jbd.h b/include/linux/jbd.h index dcde7ad..558cb4c 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -498,6 +498,12 @@ struct transaction_s struct journal_head *t_checkpoint_list; /* + * Doubly-linked circular list of all buffers submitted for IO while + * checkpointing. [j_list_lock] + */ + struct journal_head *t_checkpoint_io_list; + + /* * Doubly-linked circular list of temporary buffers currently undergoing * IO in the log [j_list_lock] */ @@ -843,7 +849,7 @@ extern void journal_commit_transaction(journal_t *); /* Checkpoint list management */ int __journal_clean_checkpoint_list(journal_t *journal); -void __journal_remove_checkpoint(struct journal_head *); +int __journal_remove_checkpoint(struct journal_head *); void __journal_insert_checkpoint(struct journal_head *, transaction_t *); /* Buffer IO */ diff --git a/include/linux/key.h b/include/linux/key.h index 53513a3..4d189e5 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -193,14 +193,6 @@ struct key_type { */ int (*instantiate)(struct key *key, const void *data, size_t datalen); - /* duplicate a key of this type (optional) - * - the source key will be locked against change - * - the new description will be attached - * - the quota will have been adjusted automatically from - * source->quotalen - */ - int (*duplicate)(struct key *key, const struct key *source); - /* update a key of this type (optional) * - this method should call key_payload_reserve() to recalculate the * quota consumption diff --git a/include/linux/libata.h b/include/linux/libata.h index 6db2c08..a43c95f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -124,6 +124,8 @@ enum { ATA_FLAG_DEBUGMSG = (1 << 10), ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ + ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ + ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ @@ -436,6 +438,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr); extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, unsigned int n_ports); extern void ata_pci_remove_one (struct pci_dev *pdev); +extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state); +extern int ata_pci_device_resume(struct pci_dev *pdev); #endif /* CONFIG_PCI */ extern int ata_device_add(const struct ata_probe_ent *ent); extern void ata_host_set_remove(struct ata_host_set *host_set); @@ -445,6 +449,10 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn extern int ata_scsi_error(struct Scsi_Host *host); extern int ata_scsi_release(struct Scsi_Host *host); extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); +extern int ata_scsi_device_resume(struct scsi_device *); +extern int ata_scsi_device_suspend(struct scsi_device *); +extern int ata_device_resume(struct ata_port *, struct ata_device *); +extern int ata_device_suspend(struct ata_port *, struct ata_device *); extern int ata_ratelimit(void); /* diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 8b67cf8..ed00b27 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -110,14 +110,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) /* - * Hugetlb policy. i386 hugetlb so far works with node numbers - * instead of zone lists, so give it special interfaces for now. - */ -extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr); -extern int mpol_node_valid(int nid, struct vm_area_struct *vma, - unsigned long addr); - -/* * Tree of shared policies for a shared memory region. * Maintain the policies in a pseudo mm that contains vmas. The vmas * carry the policy. As a special twist the pseudo mm is indexed in pages, not @@ -156,6 +148,16 @@ extern void numa_default_policy(void); extern void numa_policy_init(void); extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); extern struct mempolicy default_policy; +extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr); + +extern int policy_zone; + +static inline void check_highest_zone(int k) +{ + if (k > policy_zone) + policy_zone = k; +} #else @@ -182,17 +184,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old) return NULL; } -static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a) -{ - return numa_node_id(); -} - -static inline int -mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a) -{ - return 1; -} - struct shared_policy {}; static inline int mpol_set_shared_policy(struct shared_policy *info, @@ -232,6 +223,15 @@ static inline void numa_policy_rebind(const nodemask_t *old, { } +static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr) +{ + return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); +} + +static inline void check_highest_zone(int k) +{ +} #endif /* CONFIG_NUMA */ #endif /* __KERNEL__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index a06a84d..bc01fff 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -634,14 +634,38 @@ struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, int shmem_lock(struct file *file, int lock, struct user_struct *user); #else #define shmem_nopage filemap_nopage -#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */ -#define shmem_set_policy(a, b) (0) -#define shmem_get_policy(a, b) (NULL) + +static inline int shmem_lock(struct file *file, int lock, + struct user_struct *user) +{ + return 0; +} + +static inline int shmem_set_policy(struct vm_area_struct *vma, + struct mempolicy *new) +{ + return 0; +} + +static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, + unsigned long addr) +{ + return NULL; +} #endif struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); +extern int shmem_mmap(struct file *file, struct vm_area_struct *vma); int shmem_zero_setup(struct vm_area_struct *); +#ifndef CONFIG_MMU +extern unsigned long shmem_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + static inline int can_do_mlock(void) { if (capable(CAP_IPC_LOCK)) @@ -690,14 +714,31 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, } extern int vmtruncate(struct inode * inode, loff_t offset); +extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); -extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); -static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) +#ifdef CONFIG_MMU +extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, + unsigned long address, int write_access); + +static inline int handle_mm_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int write_access) { - return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE); + return __handle_mm_fault(mm, vma, address, write_access) & + (~VM_FAULT_WRITE); } +#else +static inline int handle_mm_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int write_access) +{ + /* should never happen if there's no MMU */ + BUG(); + return VM_FAULT_SIGBUS; +} +#endif extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); @@ -896,6 +937,8 @@ extern unsigned long do_brk(unsigned long, unsigned long); /* filemap.c */ extern unsigned long page_unuse(struct page *); extern void truncate_inode_pages(struct address_space *, loff_t); +extern void truncate_inode_pages_range(struct address_space *, + loff_t lstart, loff_t lend); /* generic vm_area_ops exported for stackable file systems */ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9f22090d..c34f4a2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -46,7 +46,6 @@ struct zone_padding { struct per_cpu_pages { int count; /* number of pages in the list */ - int low; /* low watermark, refill needed */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ struct list_head list; /* the list of pages */ @@ -389,6 +388,11 @@ static inline struct zone *next_zone(struct zone *zone) #define for_each_zone(zone) \ for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) +static inline int populated_zone(struct zone *zone) +{ + return (!!zone->present_pages); +} + static inline int is_highmem_idx(int idx) { return (idx == ZONE_HIGHMEM); @@ -398,6 +402,7 @@ static inline int is_normal_idx(int idx) { return (idx == ZONE_NORMAL); } + /** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references @@ -414,6 +419,16 @@ static inline int is_normal(struct zone *zone) return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; } +static inline int is_dma32(struct zone *zone) +{ + return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; +} + +static inline int is_dma(struct zone *zone) +{ + return zone == zone->zone_pgdat->node_zones + ZONE_DMA; +} + /* These two functions are used to setup the per zone pages min values */ struct ctl_table; struct file; @@ -435,7 +450,6 @@ extern struct pglist_data contig_page_data; #define NODE_DATA(nid) (&contig_page_data) #define NODE_MEM_MAP(nid) mem_map #define MAX_NODES_SHIFT 1 -#define pfn_to_nid(pfn) (0) #else /* CONFIG_NEED_MULTIPLE_NODES */ @@ -470,6 +484,10 @@ extern struct pglist_data contig_page_data; #define early_pfn_to_nid(nid) (0UL) #endif +#ifdef CONFIG_FLATMEM +#define pfn_to_nid(pfn) (0) +#endif + #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) @@ -564,11 +582,6 @@ static inline int valid_section_nr(unsigned long nr) return valid_section(__nr_to_section(nr)); } -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) - static inline struct mem_section *__pfn_to_section(unsigned long pfn) { return __nr_to_section(pfn_to_section_nr(pfn)); @@ -598,13 +611,14 @@ static inline int pfn_valid(unsigned long pfn) * this restriction. */ #ifdef CONFIG_NUMA -#define pfn_to_nid early_pfn_to_nid -#endif - -#define pfn_to_pgdat(pfn) \ +#define pfn_to_nid(pfn) \ ({ \ - NODE_DATA(pfn_to_nid(pfn)); \ + unsigned long __pfn_to_nid_pfn = (pfn); \ + page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ }) +#else +#define pfn_to_nid(pfn) (0) +#endif #define early_pfn_valid(pfn) pfn_valid(pfn) void sparse_init(void); @@ -613,12 +627,6 @@ void sparse_init(void); #define sparse_index_init(_sec, _nid) do {} while (0) #endif /* CONFIG_SPARSEMEM */ -#ifdef CONFIG_NODES_SPAN_OTHER_NODES -#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid)) -#else -#define early_pfn_in_nid(pfn, nid) (1) -#endif - #ifndef early_pfn_valid #define early_pfn_valid(pfn) (1) #endif diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 090e210..f95d51f 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h @@ -37,18 +37,26 @@ enum { /* userspace doesn't need the nbd_device structure */ #ifdef __KERNEL__ +#include <linux/wait.h> + /* values for flags field */ #define NBD_READ_ONLY 0x0001 #define NBD_WRITE_NOCHK 0x0002 +struct request; + struct nbd_device { int flags; int harderror; /* Code of hard error */ struct socket * sock; struct file * file; /* If == NULL, device is not ready, yet */ int magic; + spinlock_t queue_lock; struct list_head queue_head;/* Requests are added here... */ + struct request *active_req; + wait_queue_head_t active_wq; + struct semaphore tx_lock; struct gendisk *disk; int blksize; diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h index 130d4f5..3f4f714 100644 --- a/include/linux/nfsd/xdr.h +++ b/include/linux/nfsd/xdr.h @@ -88,10 +88,12 @@ struct nfsd_readdirargs { struct nfsd_attrstat { struct svc_fh fh; + struct kstat stat; }; struct nfsd_diropres { struct svc_fh fh; + struct kstat stat; }; struct nfsd_readlinkres { @@ -101,6 +103,7 @@ struct nfsd_readlinkres { struct nfsd_readres { struct svc_fh fh; unsigned long count; + struct kstat stat; }; struct nfsd_readdirres { diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h index 3c2a71b..a432274 100644 --- a/include/linux/nfsd/xdr3.h +++ b/include/linux/nfsd/xdr3.h @@ -126,6 +126,7 @@ struct nfsd3_setaclargs { struct nfsd3_attrstat { __u32 status; struct svc_fh fh; + struct kstat stat; }; /* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 343083f..d52999c 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -79,13 +79,23 @@ /* * Global page accounting. One instance per CPU. Only unsigned longs are * allowed. + * + * - Fields can be modified with xxx_page_state and xxx_page_state_zone at + * any time safely (which protects the instance from modification by + * interrupt. + * - The __xxx_page_state variants can be used safely when interrupts are + * disabled. + * - The __xxx_page_state variants can be used if the field is only + * modified from process context, or only modified from interrupt context. + * In this case, the field should be commented here. */ struct page_state { unsigned long nr_dirty; /* Dirty writeable pages */ unsigned long nr_writeback; /* Pages under writeback */ unsigned long nr_unstable; /* NFS unstable pages */ unsigned long nr_page_table_pages;/* Pages used for pagetables */ - unsigned long nr_mapped; /* mapped into pagetables */ + unsigned long nr_mapped; /* mapped into pagetables. + * only modified from process context */ unsigned long nr_slab; /* In slab */ #define GET_PAGE_STATE_LAST nr_slab @@ -97,32 +107,40 @@ struct page_state { unsigned long pgpgout; /* Disk writes */ unsigned long pswpin; /* swap reads */ unsigned long pswpout; /* swap writes */ - unsigned long pgalloc_high; /* page allocations */ + unsigned long pgalloc_high; /* page allocations */ unsigned long pgalloc_normal; + unsigned long pgalloc_dma32; unsigned long pgalloc_dma; + unsigned long pgfree; /* page freeings */ unsigned long pgactivate; /* pages moved inactive->active */ unsigned long pgdeactivate; /* pages moved active->inactive */ unsigned long pgfault; /* faults (major+minor) */ unsigned long pgmajfault; /* faults (major only) */ + unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ unsigned long pgrefill_normal; + unsigned long pgrefill_dma32; unsigned long pgrefill_dma; unsigned long pgsteal_high; /* total highmem pages reclaimed */ unsigned long pgsteal_normal; + unsigned long pgsteal_dma32; unsigned long pgsteal_dma; + unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ unsigned long pgscan_kswapd_normal; - + unsigned long pgscan_kswapd_dma32; unsigned long pgscan_kswapd_dma; + unsigned long pgscan_direct_high;/* total highmem pages scanned */ unsigned long pgscan_direct_normal; + unsigned long pgscan_direct_dma32; unsigned long pgscan_direct_dma; - unsigned long pginodesteal; /* pages reclaimed via inode freeing */ + unsigned long pginodesteal; /* pages reclaimed via inode freeing */ unsigned long slabs_scanned; /* slab objects scanned */ unsigned long kswapd_steal; /* pages reclaimed by kswapd */ unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ @@ -136,31 +154,54 @@ struct page_state { extern void get_page_state(struct page_state *ret); extern void get_page_state_node(struct page_state *ret, int node); extern void get_full_page_state(struct page_state *ret); -extern unsigned long __read_page_state(unsigned long offset); -extern void __mod_page_state(unsigned long offset, unsigned long delta); +extern unsigned long read_page_state_offset(unsigned long offset); +extern void mod_page_state_offset(unsigned long offset, unsigned long delta); +extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define read_page_state(member) \ - __read_page_state(offsetof(struct page_state, member)) + read_page_state_offset(offsetof(struct page_state, member)) #define mod_page_state(member, delta) \ - __mod_page_state(offsetof(struct page_state, member), (delta)) - -#define inc_page_state(member) mod_page_state(member, 1UL) -#define dec_page_state(member) mod_page_state(member, 0UL - 1) -#define add_page_state(member,delta) mod_page_state(member, (delta)) -#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) - -#define mod_page_state_zone(zone, member, delta) \ - do { \ - unsigned offset; \ - if (is_highmem(zone)) \ - offset = offsetof(struct page_state, member##_high); \ - else if (is_normal(zone)) \ - offset = offsetof(struct page_state, member##_normal); \ - else \ - offset = offsetof(struct page_state, member##_dma); \ - __mod_page_state(offset, (delta)); \ - } while (0) + mod_page_state_offset(offsetof(struct page_state, member), (delta)) + +#define __mod_page_state(member, delta) \ + __mod_page_state_offset(offsetof(struct page_state, member), (delta)) + +#define inc_page_state(member) mod_page_state(member, 1UL) +#define dec_page_state(member) mod_page_state(member, 0UL - 1) +#define add_page_state(member,delta) mod_page_state(member, (delta)) +#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) + +#define __inc_page_state(member) __mod_page_state(member, 1UL) +#define __dec_page_state(member) __mod_page_state(member, 0UL - 1) +#define __add_page_state(member,delta) __mod_page_state(member, (delta)) +#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) + +#define page_state(member) (*__page_state(offsetof(struct page_state, member))) + +#define state_zone_offset(zone, member) \ +({ \ + unsigned offset; \ + if (is_highmem(zone)) \ + offset = offsetof(struct page_state, member##_high); \ + else if (is_normal(zone)) \ + offset = offsetof(struct page_state, member##_normal); \ + else if (is_dma32(zone)) \ + offset = offsetof(struct page_state, member##_dma32); \ + else \ + offset = offsetof(struct page_state, member##_dma); \ + offset; \ +}) + +#define __mod_page_state_zone(zone, member, delta) \ + do { \ + __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ + } while (0) + +#define mod_page_state_zone(zone, member, delta) \ + do { \ + mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ + } while (0) /* * Manipulation of page state flags diff --git a/include/linux/parport.h b/include/linux/parport.h index d2a4d9e..f7ff0b0 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -242,7 +242,6 @@ enum ieee1284_phase { IEEE1284_PH_FWD_IDLE, IEEE1284_PH_TERMINATE, IEEE1284_PH_NEGOTIATION, - IEEE1284_PH_HBUSY_DNA, IEEE1284_PH_REV_IDLE, IEEE1284_PH_HBUSY_DAVAIL, IEEE1284_PH_REV_DATA, diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h index c6f7624..1cc0f6b 100644 --- a/include/linux/parport_pc.h +++ b/include/linux/parport_pc.h @@ -79,13 +79,13 @@ static __inline__ unsigned char parport_pc_read_data(struct parport *p) } #ifdef DEBUG_PARPORT -extern __inline__ void dump_parport_state (char *str, struct parport *p) +static inline void dump_parport_state (char *str, struct parport *p) { /* here's hoping that reading these ports won't side-effect anything underneath */ unsigned char ecr = inb (ECONTROL (p)); unsigned char dcr = inb (CONTROL (p)); unsigned char dsr = inb (STATUS (p)); - static char *ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"}; + static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"}; const struct parport_pc_private *priv = p->physport->private_data; int i; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4f01710..24db724 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -394,6 +394,13 @@ #define PCI_DEVICE_ID_NS_87410 0xd001 #define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d +#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028 +#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b +#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d +#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e +#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f +#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030 + #define PCI_VENDOR_ID_TSENG 0x100c #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 #define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 @@ -496,6 +503,9 @@ #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A +#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 +#define PCI_DEVICE_ID_AMD_LX_AES 0x2082 + #define PCI_VENDOR_ID_TRIDENT 0x1023 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index 13e7c4b..b6e0bca 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -71,8 +71,8 @@ */ #define MD_PATCHLEVEL_VERSION 3 -extern int register_md_personality (int p_num, mdk_personality_t *p); -extern int unregister_md_personality (int p_num); +extern int register_md_personality (struct mdk_personality *p); +extern int unregister_md_personality (struct mdk_personality *p); extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), mddev_t *mddev, const char *name); extern void md_unregister_thread (mdk_thread_t *thread); diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 46629a2..617b950 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -18,62 +18,19 @@ /* and dm-bio-list.h is not under include/linux because.... ??? */ #include "../../../drivers/md/dm-bio-list.h" -#define MD_RESERVED 0UL -#define LINEAR 1UL -#define RAID0 2UL -#define RAID1 3UL -#define RAID5 4UL -#define TRANSLUCENT 5UL -#define HSM 6UL -#define MULTIPATH 7UL -#define RAID6 8UL -#define RAID10 9UL -#define FAULTY 10UL -#define MAX_PERSONALITY 11UL - #define LEVEL_MULTIPATH (-4) #define LEVEL_LINEAR (-1) #define LEVEL_FAULTY (-5) +/* we need a value for 'no level specified' and 0 + * means 'raid0', so we need something else. This is + * for internal use only + */ +#define LEVEL_NONE (-1000000) + #define MaxSector (~(sector_t)0) #define MD_THREAD_NAME_MAX 14 -static inline int pers_to_level (int pers) -{ - switch (pers) { - case FAULTY: return LEVEL_FAULTY; - case MULTIPATH: return LEVEL_MULTIPATH; - case HSM: return -3; - case TRANSLUCENT: return -2; - case LINEAR: return LEVEL_LINEAR; - case RAID0: return 0; - case RAID1: return 1; - case RAID5: return 5; - case RAID6: return 6; - case RAID10: return 10; - } - BUG(); - return MD_RESERVED; -} - -static inline int level_to_pers (int level) -{ - switch (level) { - case LEVEL_FAULTY: return FAULTY; - case LEVEL_MULTIPATH: return MULTIPATH; - case -3: return HSM; - case -2: return TRANSLUCENT; - case LEVEL_LINEAR: return LINEAR; - case 0: return RAID0; - case 1: return RAID1; - case 4: - case 5: return RAID5; - case 6: return RAID6; - case 10: return RAID10; - } - return MD_RESERVED; -} - typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; @@ -138,14 +95,16 @@ struct mdk_rdev_s atomic_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ + atomic_t corrected_errors; /* number of corrected read errors, + * for reporting to userspace and storing + * in superblock. + */ }; -typedef struct mdk_personality_s mdk_personality_t; - struct mddev_s { void *private; - mdk_personality_t *pers; + struct mdk_personality *pers; dev_t unit; int md_minor; struct list_head disks; @@ -164,6 +123,7 @@ struct mddev_s int chunk_size; time_t ctime, utime; int level, layout; + char clevel[16]; int raid_disks; int max_disks; sector_t size; /* used size of component devices */ @@ -183,6 +143,11 @@ struct mddev_s sector_t resync_mismatches; /* count of sectors where * parity/replica mismatch found */ + /* if zero, use the system-wide default */ + int sync_speed_min; + int sync_speed_max; + + int ok_start_degraded; /* recovery/resync flags * NEEDED: we might need to start a resync/recover * RUNNING: a thread is running, or about to be started @@ -265,9 +230,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } -struct mdk_personality_s +struct mdk_personality { char *name; + int level; + struct list_head list; struct module *owner; int (*make_request)(request_queue_t *q, struct bio *bio); int (*run)(mddev_t *mddev); @@ -305,8 +272,6 @@ static inline char * mdname (mddev_t * mddev) return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; } -extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr); - /* * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. @@ -366,5 +331,10 @@ do { \ __wait_event_lock_irq(wq, condition, lock, cmd); \ } while (0) +static inline void safe_put_page(struct page *p) +{ + if (p) put_page(p); +} + #endif diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h index 292b98f..9d5494a 100644 --- a/include/linux/raid/raid1.h +++ b/include/linux/raid/raid1.h @@ -45,6 +45,8 @@ struct r1_private_data_s { spinlock_t resync_lock; int nr_pending; + int nr_waiting; + int nr_queued; int barrier; sector_t next_resync; int fullsync; /* set to 1 if a full sync is needed, @@ -52,11 +54,12 @@ struct r1_private_data_s { * Cleared when a sync completes. */ - wait_queue_head_t wait_idle; - wait_queue_head_t wait_resume; + wait_queue_head_t wait_barrier; struct pool_info *poolinfo; + struct page *tmppage; + mempool_t *r1bio_pool; mempool_t *r1buf_pool; }; @@ -106,6 +109,13 @@ struct r1bio_s { /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ }; +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio*)1) + /* bits for r1bio.state */ #define R1BIO_Uptodate 0 #define R1BIO_IsSync 1 diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h index 6070878..b110329 100644 --- a/include/linux/raid/raid10.h +++ b/include/linux/raid/raid10.h @@ -35,18 +35,26 @@ struct r10_private_data_s { sector_t chunk_mask; struct list_head retry_list; - /* for use when syncing mirrors: */ + /* queue pending writes and submit them on unplug */ + struct bio_list pending_bio_list; + spinlock_t resync_lock; int nr_pending; + int nr_waiting; + int nr_queued; int barrier; sector_t next_resync; + int fullsync; /* set to 1 if a full sync is needed, + * (fresh device added). + * Cleared when a sync completes. + */ - wait_queue_head_t wait_idle; - wait_queue_head_t wait_resume; + wait_queue_head_t wait_barrier; mempool_t *r10bio_pool; mempool_t *r10buf_pool; + struct page *tmppage; }; typedef struct r10_private_data_s conf_t; @@ -96,8 +104,16 @@ struct r10bio_s { } devs[0]; }; +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio*)1) + /* bits for r10bio.state */ #define R10BIO_Uptodate 0 #define R10BIO_IsSync 1 #define R10BIO_IsRecover 2 +#define R10BIO_Degraded 3 #endif diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index f025ba6..394da82 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -126,7 +126,7 @@ */ struct stripe_head { - struct stripe_head *hash_next, **hash_pprev; /* hash pointers */ + struct hlist_node hash; struct list_head lru; /* inactive_list or handle_list */ struct raid5_private_data *raid_conf; sector_t sector; /* sector of this row */ @@ -152,7 +152,6 @@ struct stripe_head { #define R5_Insync 3 /* rdev && rdev->in_sync at start */ #define R5_Wantread 4 /* want to schedule a read */ #define R5_Wantwrite 5 -#define R5_Syncio 6 /* this io need to be accounted as resync io */ #define R5_Overlap 7 /* There is a pending overlapping request on this block */ #define R5_ReadError 8 /* seen a read error here recently */ #define R5_ReWrite 9 /* have tried to over-write the readerror */ @@ -205,7 +204,7 @@ struct disk_info { }; struct raid5_private_data { - struct stripe_head **stripe_hashtbl; + struct hlist_head *stripe_hashtbl; mddev_t *mddev; struct disk_info *spare; int chunk_size, level, algorithm; @@ -228,6 +227,8 @@ struct raid5_private_data { * Cleared when a sync completes. */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + /* * Free stripes pool */ diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index e0a4faa..953b6df 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -5,6 +5,16 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev); struct super_block *ramfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); +#ifndef CONFIG_MMU +extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); + +extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); +#endif + extern struct file_operations ramfs_file_operations; extern struct vm_operations_struct generic_file_vm_ops; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 33261f1..9d6fbee 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *); * rmap interfaces called when adding or removing pte of page */ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_file_rmap(struct page *); void page_remove_rmap(struct page *); diff --git a/include/linux/sched.h b/include/linux/sched.h index b0ad6f3..7da3361 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); * The mm counters are not protected by its page_table_lock, * so must be incremented atomically. */ -#ifdef ATOMIC64_INIT -#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value) -#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member)) -#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member) -#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member) -#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member) -typedef atomic64_t mm_counter_t; -#else /* !ATOMIC64_INIT */ -/* - * The counters wrap back to 0 at 2^32 * PAGE_SIZE, - * that is, at 16TB if using 4kB page size. - */ -#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value) -#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member)) -#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member) -#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member) -#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member) -typedef atomic_t mm_counter_t; -#endif /* !ATOMIC64_INIT */ +#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) +#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) +#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) +#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) +#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) +typedef atomic_long_t mm_counter_t; #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ /* diff --git a/include/linux/suspend.h b/include/linux/suspend.h index a61c04f..5dc94e7 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -14,11 +14,7 @@ typedef struct pbe { unsigned long address; /* address of the copy */ unsigned long orig_address; /* original address of page */ - swp_entry_t swap_address; - - struct pbe *next; /* also used as scratch space at - * end of page (see link, diskpage) - */ + struct pbe *next; } suspend_pagedir_t; #define for_each_pbe(pbe, pblist) \ @@ -77,6 +73,6 @@ unsigned long get_safe_page(gfp_t gfp_mask); * XXX: We try to keep some more pages free so that I/O operations succeed * without paging. Might this be more? */ -#define PAGES_FOR_IO 512 +#define PAGES_FOR_IO 1024 #endif /* _LINUX_SWSUSP_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 508668f..556617b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -172,7 +172,6 @@ extern void swap_setup(void); /* linux/mm/vmscan.c */ extern int try_to_free_pages(struct zone **, gfp_t); -extern int zone_reclaim(struct zone *, gfp_t, unsigned int); extern int shrink_all_memory(int); extern int vm_swappiness; @@ -210,6 +209,7 @@ extern unsigned int nr_swapfiles; extern struct swap_info_struct swap_info[]; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); +extern swp_entry_t get_swap_page_of_type(int type); extern int swap_duplicate(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern void swap_free(swp_entry_t); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 25f637b..230bc55 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -296,6 +296,12 @@ struct scsi_host_template { int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int); /* + * suspend support + */ + int (*resume)(struct scsi_device *); + int (*suspend)(struct scsi_device *); + + /* * Name of proc directory */ char *proc_name; diff --git a/init/Kconfig b/init/Kconfig index ce737e0..ba42f37 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -105,7 +105,6 @@ config SWAP config SYSVIPC bool "System V IPC" - depends on MMU ---help--- Inter Process Communication is a suite of library functions and system calls which let processes (running programs) synchronize and @@ -190,7 +189,7 @@ config AUDIT config AUDITSYSCALL bool "Enable system-call auditing support" - depends on AUDIT && (X86 || PPC || PPC64 || ARCH_S390 || IA64 || UML || SPARC64) + depends on AUDIT && (X86 || PPC || PPC64 || S390 || IA64 || UML || SPARC64) default y if SECURITY_SELINUX help Enable low-overhead system-call auditing infrastructure that diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c index 3fbc355..f6f3680 100644 --- a/init/do_mounts_md.c +++ b/init/do_mounts_md.c @@ -17,7 +17,7 @@ static int __initdata raid_noautodetect, raid_autopart; static struct { int minor; int partitioned; - int pers; + int level; int chunk; char *device_names; } md_setup_args[MAX_MD_DEVS] __initdata; @@ -47,7 +47,7 @@ extern int mdp_major; */ static int __init md_setup(char *str) { - int minor, level, factor, fault, pers, partitioned = 0; + int minor, level, factor, fault, partitioned = 0; char *pername = ""; char *str1; int ent; @@ -78,7 +78,7 @@ static int __init md_setup(char *str) } if (ent >= md_setup_ents) md_setup_ents++; - switch (get_option(&str, &level)) { /* RAID Personality */ + switch (get_option(&str, &level)) { /* RAID level */ case 2: /* could be 0 or -1.. */ if (level == 0 || level == LEVEL_LINEAR) { if (get_option(&str, &factor) != 2 || /* Chunk Size */ @@ -86,16 +86,12 @@ static int __init md_setup(char *str) printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); return 0; } - md_setup_args[ent].pers = level; + md_setup_args[ent].level = level; md_setup_args[ent].chunk = 1 << (factor+12); - if (level == LEVEL_LINEAR) { - pers = LINEAR; + if (level == LEVEL_LINEAR) pername = "linear"; - } else { - pers = RAID0; + else pername = "raid0"; - } - md_setup_args[ent].pers = pers; break; } /* FALL THROUGH */ @@ -103,7 +99,7 @@ static int __init md_setup(char *str) str = str1; /* FALL THROUGH */ case 0: - md_setup_args[ent].pers = 0; + md_setup_args[ent].level = LEVEL_NONE; pername="super-block"; } @@ -190,10 +186,10 @@ static void __init md_setup_drive(void) continue; } - if (md_setup_args[ent].pers) { + if (md_setup_args[ent].level != LEVEL_NONE) { /* non-persistent */ mdu_array_info_t ainfo; - ainfo.level = pers_to_level(md_setup_args[ent].pers); + ainfo.level = md_setup_args[ent].level; ainfo.size = 0; ainfo.nr_disks =0; ainfo.raid_disks =0; diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index c10b08a..c2683fc 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c @@ -145,7 +145,7 @@ int __init rd_load_image(char *from) int nblocks, i, disk; char *buf = NULL; unsigned short rotate = 0; -#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_PPC_ISERIES) +#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) char rotator[4] = { '|' , '/' , '-' , '\\' }; #endif @@ -237,7 +237,7 @@ int __init rd_load_image(char *from) } sys_read(in_fd, buf, BLOCK_SIZE); sys_write(out_fd, buf, BLOCK_SIZE); -#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_PPC_ISERIES) +#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) if (!(i % 16)) { printk("%c\b", rotator[rotate & 0x3]); rotate++; diff --git a/init/main.c b/init/main.c index 54aaf56..2ed3638 100644 --- a/init/main.c +++ b/init/main.c @@ -52,6 +52,7 @@ #include <asm/bugs.h> #include <asm/setup.h> #include <asm/sections.h> +#include <asm/cacheflush.h> /* * This is one of the first .c files built. Error out early @@ -99,6 +100,9 @@ extern void acpi_early_init(void); #else static inline void acpi_early_init(void) { } #endif +#ifndef CONFIG_DEBUG_RODATA +static inline void mark_rodata_ro(void) { } +#endif #ifdef CONFIG_TC extern void tc_init(void); @@ -708,6 +712,7 @@ static int init(void * unused) */ free_initmem(); unlock_kernel(); + mark_rodata_ro(); system_state = SYSTEM_RUNNING; numa_default_policy(); @@ -157,14 +157,22 @@ static void shm_close (struct vm_area_struct *shmd) static int shm_mmap(struct file * file, struct vm_area_struct * vma) { - file_accessed(file); - vma->vm_ops = &shm_vm_ops; - shm_inc(file->f_dentry->d_inode->i_ino); - return 0; + int ret; + + ret = shmem_mmap(file, vma); + if (ret == 0) { + vma->vm_ops = &shm_vm_ops; + shm_inc(file->f_dentry->d_inode->i_ino); + } + + return ret; } static struct file_operations shm_file_operations = { - .mmap = shm_mmap + .mmap = shm_mmap, +#ifndef CONFIG_MMU + .get_unmapped_area = shmem_get_unmapped_area, +#endif }; static struct vm_operations_struct shm_vm_ops = { diff --git a/kernel/acct.c b/kernel/acct.c index 6312d6b..38d57fa 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -427,6 +427,7 @@ static void do_acct_process(long exitcode, struct file *file) u64 elapsed; u64 run_time; struct timespec uptime; + unsigned long jiffies; /* * First check to see if there is enough free_space to continue @@ -467,12 +468,12 @@ static void do_acct_process(long exitcode, struct file *file) #endif do_div(elapsed, AHZ); ac.ac_btime = xtime.tv_sec - elapsed; - ac.ac_utime = encode_comp_t(jiffies_to_AHZ( - current->signal->utime + - current->group_leader->utime)); - ac.ac_stime = encode_comp_t(jiffies_to_AHZ( - current->signal->stime + - current->group_leader->stime)); + jiffies = cputime_to_jiffies(cputime_add(current->group_leader->utime, + current->signal->utime)); + ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies)); + jiffies = cputime_to_jiffies(cputime_add(current->group_leader->stime, + current->signal->stime)); + ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies)); /* we really need to bite the bullet and change layout */ ac.ac_uid = current->uid; ac.ac_gid = current->gid; @@ -580,7 +581,8 @@ void acct_process(long exitcode) void acct_update_integrals(struct task_struct *tsk) { if (likely(tsk->mm)) { - long delta = tsk->stime - tsk->acct_stimexpd; + long delta = + cputime_to_jiffies(tsk->stime) - tsk->acct_stimexpd; if (delta == 0) return; diff --git a/kernel/futex.c b/kernel/futex.c index 5e71a6b..5efa2f9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -356,6 +356,13 @@ retry: if (bh1 != bh2) spin_unlock(&bh2->lock); +#ifndef CONFIG_MMU + /* we don't get EFAULT from MMU faults if we don't have an MMU, + * but we might get them from range checking */ + ret = op_ret; + goto out; +#endif + if (unlikely(op_ret != -EFAULT)) { ret = op_ret; goto out; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 81c49a4..97d5559 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -366,6 +366,8 @@ int request_irq(unsigned int irq, action->next = NULL; action->dev_id = dev_id; + select_smp_affinity(irq); + retval = setup_irq(irq, action); if (retval) kfree(action); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index f26e534..8a64a48 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -68,7 +68,9 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, */ cpus_and(tmp, new_value, cpu_online_map); if (cpus_empty(tmp)) - return -EINVAL; + /* Special case for empty set - allow the architecture + code to set default SMP affinity. */ + return select_smp_affinity(irq) ? -EINVAL : full_count; proc_set_irq_affinity(irq, new_value); diff --git a/kernel/module.c b/kernel/module.c index 2ea929d..4b06bba 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1854,8 +1854,7 @@ static struct module *load_module(void __user *umod, kfree(args); free_hdr: vfree(hdr); - if (err < 0) return ERR_PTR(err); - else return ptr; + return ERR_PTR(err); truncated: printk(KERN_ERR "Module len %lu truncated\n", len); diff --git a/kernel/panic.c b/kernel/panic.c index aabc5f8..c5c4ab2 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -60,7 +60,7 @@ NORET_TYPE void panic(const char * fmt, ...) long i; static char buf[1024]; va_list args; -#if defined(CONFIG_ARCH_S390) +#if defined(CONFIG_S390) unsigned long caller = (unsigned long) __builtin_return_address(0); #endif @@ -125,7 +125,7 @@ NORET_TYPE void panic(const char * fmt, ...) printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); } #endif -#if defined(CONFIG_ARCH_S390) +#if defined(CONFIG_S390) disabled_wait(caller); #endif local_irq_enable(); diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 027322a..e24446f 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -24,10 +24,11 @@ extern suspend_disk_method_t pm_disk_mode; +extern int swsusp_shrink_memory(void); extern int swsusp_suspend(void); -extern int swsusp_write(void); +extern int swsusp_write(struct pbe *pblist, unsigned int nr_pages); extern int swsusp_check(void); -extern int swsusp_read(void); +extern int swsusp_read(struct pbe **pblist_ptr); extern void swsusp_close(void); extern int swsusp_resume(void); @@ -73,31 +74,6 @@ static void power_down(suspend_disk_method_t mode) static int in_suspend __nosavedata = 0; -/** - * free_some_memory - Try to free as much memory as possible - * - * ... but do not OOM-kill anyone - * - * Notice: all userland should be stopped at this point, or - * livelock is possible. - */ - -static void free_some_memory(void) -{ - unsigned int i = 0; - unsigned int tmp; - unsigned long pages = 0; - char *p = "-\\|/"; - - printk("Freeing memory... "); - while ((tmp = shrink_all_memory(10000))) { - pages += tmp; - printk("\b%c", p[i++ % 4]); - } - printk("\bdone (%li pages freed)\n", pages); -} - - static inline void platform_finish(void) { if (pm_disk_mode == PM_DISK_PLATFORM) { @@ -127,8 +103,8 @@ static int prepare_processes(void) } /* Free memory before shutting down devices. */ - free_some_memory(); - return 0; + if (!(error = swsusp_shrink_memory())) + return 0; thaw: thaw_processes(); enable_nonboot_cpus(); @@ -176,7 +152,7 @@ int pm_suspend_disk(void) if (in_suspend) { device_resume(); pr_debug("PM: writing image.\n"); - error = swsusp_write(); + error = swsusp_write(pagedir_nosave, nr_copy_pages); if (!error) power_down(pm_disk_mode); else { @@ -247,7 +223,7 @@ static int software_resume(void) pr_debug("PM: Reading swsusp image.\n"); - if ((error = swsusp_read())) { + if ((error = swsusp_read(&pagedir_nosave))) { swsusp_free(); goto Thaw; } @@ -363,37 +339,55 @@ static ssize_t resume_show(struct subsystem * subsys, char *buf) MINOR(swsusp_resume_device)); } -static ssize_t resume_store(struct subsystem * subsys, const char * buf, size_t n) +static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n) { - int len; - char *p; unsigned int maj, min; - int error = -EINVAL; dev_t res; + int ret = -EINVAL; - p = memchr(buf, '\n', n); - len = p ? p - buf : n; + if (sscanf(buf, "%u:%u", &maj, &min) != 2) + goto out; - if (sscanf(buf, "%u:%u", &maj, &min) == 2) { - res = MKDEV(maj,min); - if (maj == MAJOR(res) && min == MINOR(res)) { - down(&pm_sem); - swsusp_resume_device = res; - up(&pm_sem); - printk("Attempting manual resume\n"); - noresume = 0; - software_resume(); - } - } + res = MKDEV(maj,min); + if (maj != MAJOR(res) || min != MINOR(res)) + goto out; - return error >= 0 ? n : error; + down(&pm_sem); + swsusp_resume_device = res; + up(&pm_sem); + printk("Attempting manual resume\n"); + noresume = 0; + software_resume(); + ret = n; +out: + return ret; } power_attr(resume); +static ssize_t image_size_show(struct subsystem * subsys, char *buf) +{ + return sprintf(buf, "%u\n", image_size); +} + +static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n) +{ + unsigned int size; + + if (sscanf(buf, "%u", &size) == 1) { + image_size = size; + return n; + } + + return -EINVAL; +} + +power_attr(image_size); + static struct attribute * g[] = { &disk_attr.attr, &resume_attr.attr, + &image_size_attr.attr, NULL, }; diff --git a/kernel/power/power.h b/kernel/power/power.h index 6c042b5..7e8492f 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -9,19 +9,13 @@ #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) #endif -#define MAX_PBES ((PAGE_SIZE - sizeof(struct new_utsname) \ - - 4 - 3*sizeof(unsigned long) - sizeof(int) \ - - sizeof(void *)) / sizeof(swp_entry_t)) - struct swsusp_info { struct new_utsname uts; u32 version_code; unsigned long num_physpages; int cpus; unsigned long image_pages; - unsigned long pagedir_pages; - suspend_pagedir_t * suspend_pagedir; - swp_entry_t pagedir[MAX_PBES]; + unsigned long pages; } __attribute__((aligned(PAGE_SIZE))); @@ -48,25 +42,27 @@ static struct subsys_attribute _name##_attr = { \ extern struct subsystem power_subsys; -extern int freeze_processes(void); -extern void thaw_processes(void); - extern int pm_prepare_console(void); extern void pm_restore_console(void); - /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; extern unsigned int nr_copy_pages; -extern suspend_pagedir_t *pagedir_nosave; -extern suspend_pagedir_t *pagedir_save; +extern struct pbe *pagedir_nosave; + +/* Preferred image size in MB (default 500) */ +extern unsigned int image_size; extern asmlinkage int swsusp_arch_suspend(void); extern asmlinkage int swsusp_arch_resume(void); +extern unsigned int count_data_pages(void); extern void free_pagedir(struct pbe *pblist); +extern void release_eaten_pages(void); extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed); -extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages); extern void swsusp_free(void); extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); +extern unsigned int snapshot_nr_pages(void); +extern struct pbe *snapshot_pblist(void); +extern void snapshot_pblist_set(struct pbe *pblist); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 4a6dbce..41f6636 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -33,7 +33,35 @@ #include "power.h" +struct pbe *pagedir_nosave; +unsigned int nr_copy_pages; + #ifdef CONFIG_HIGHMEM +unsigned int count_highmem_pages(void) +{ + struct zone *zone; + unsigned long zone_pfn; + unsigned int n = 0; + + for_each_zone (zone) + if (is_highmem(zone)) { + mark_free_pages(zone); + for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { + struct page *page; + unsigned long pfn = zone_pfn + zone->zone_start_pfn; + if (!pfn_valid(pfn)) + continue; + page = pfn_to_page(pfn); + if (PageReserved(page)) + continue; + if (PageNosaveFree(page)) + continue; + n++; + } + } + return n; +} + struct highmem_page { char *data; struct page *page; @@ -149,17 +177,15 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn) BUG_ON(PageReserved(page) && PageNosave(page)); if (PageNosave(page)) return 0; - if (PageReserved(page) && pfn_is_nosave(pfn)) { - pr_debug("[nosave pfn 0x%lx]", pfn); + if (PageReserved(page) && pfn_is_nosave(pfn)) return 0; - } if (PageNosaveFree(page)) return 0; return 1; } -static unsigned count_data_pages(void) +unsigned int count_data_pages(void) { struct zone *zone; unsigned long zone_pfn; @@ -244,7 +270,7 @@ static inline void fill_pb_page(struct pbe *pbpage) * of memory pages allocated with alloc_pagedir() */ -void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) +static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) { struct pbe *pbpage, *p; unsigned int num = PBES_PER_PAGE; @@ -261,7 +287,35 @@ void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) p->next = p + 1; p->next = NULL; } - pr_debug("create_pbe_list(): initialized %d PBEs\n", num); +} + +/** + * On resume it is necessary to trace and eventually free the unsafe + * pages that have been allocated, because they are needed for I/O + * (on x86-64 we likely will "eat" these pages once again while + * creating the temporary page translation tables) + */ + +struct eaten_page { + struct eaten_page *next; + char padding[PAGE_SIZE - sizeof(void *)]; +}; + +static struct eaten_page *eaten_pages = NULL; + +void release_eaten_pages(void) +{ + struct eaten_page *p, *q; + + p = eaten_pages; + while (p) { + q = p->next; + /* We don't want swsusp_free() to free this page again */ + ClearPageNosave(virt_to_page(p)); + free_page((unsigned long)p); + p = q; + } + eaten_pages = NULL; } /** @@ -282,9 +336,12 @@ static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) if (safe_needed) do { res = (void *)get_zeroed_page(gfp_mask); - if (res && PageNosaveFree(virt_to_page(res))) + if (res && PageNosaveFree(virt_to_page(res))) { /* This is for swsusp_free() */ SetPageNosave(virt_to_page(res)); + ((struct eaten_page *)res)->next = eaten_pages; + eaten_pages = res; + } } while (res && PageNosaveFree(virt_to_page(res))); else res = (void *)get_zeroed_page(gfp_mask); @@ -332,7 +389,8 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed if (!pbe) { /* get_zeroed_page() failed */ free_pagedir(pblist); pblist = NULL; - } + } else + create_pbe_list(pblist, nr_pages); return pblist; } @@ -370,8 +428,14 @@ void swsusp_free(void) static int enough_free_mem(unsigned int nr_pages) { - pr_debug("swsusp: available memory: %u pages\n", nr_free_pages()); - return nr_free_pages() > (nr_pages + PAGES_FOR_IO + + struct zone *zone; + unsigned int n = 0; + + for_each_zone (zone) + if (!is_highmem(zone)) + n += zone->free_pages; + pr_debug("swsusp: available memory: %u pages\n", n); + return n > (nr_pages + PAGES_FOR_IO + (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); } @@ -395,7 +459,6 @@ static struct pbe *swsusp_alloc(unsigned int nr_pages) printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); return NULL; } - create_pbe_list(pblist, nr_pages); if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { printk(KERN_ERR "suspend: Allocating image pages failed.\n"); @@ -421,10 +484,6 @@ asmlinkage int swsusp_save(void) (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE, PAGES_FOR_IO, nr_free_pages()); - /* This is needed because of the fixed size of swsusp_info */ - if (MAX_PBES < (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE) - return -ENOSPC; - if (!enough_free_mem(nr_pages)) { printk(KERN_ERR "swsusp: Not enough free memory\n"); return -ENOMEM; diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index c05f46e..55a18d2 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -30,8 +30,8 @@ * Alex Badea <vampire@go.ro>: * Fixed runaway init * - * Andreas Steinmetz <ast@domdv.de>: - * Added encrypted suspend option + * Rafael J. Wysocki <rjw@sisk.pl> + * Added the swap map data structure and reworked the handling of swap * * More state savers are welcome. Especially for the scsi layer... * @@ -67,44 +67,33 @@ #include <asm/tlbflush.h> #include <asm/io.h> -#include <linux/random.h> -#include <linux/crypto.h> -#include <asm/scatterlist.h> - #include "power.h" +/* + * Preferred image size in MB (tunable via /sys/power/image_size). + * When it is set to N, swsusp will do its best to ensure the image + * size will not exceed N MB, but if that is impossible, it will + * try to create the smallest image possible. + */ +unsigned int image_size = 500; + #ifdef CONFIG_HIGHMEM +unsigned int count_highmem_pages(void); int save_highmem(void); int restore_highmem(void); #else static int save_highmem(void) { return 0; } static int restore_highmem(void) { return 0; } +static unsigned int count_highmem_pages(void) { return 0; } #endif -#define CIPHER "aes" -#define MAXKEY 32 -#define MAXIV 32 - extern char resume_file[]; -/* Local variables that should not be affected by save */ -unsigned int nr_copy_pages __nosavedata = 0; - -/* Suspend pagedir is allocated before final copy, therefore it - must be freed after resume - - Warning: this is even more evil than it seems. Pagedirs this file - talks about are completely different from page directories used by - MMU hardware. - */ -suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; - #define SWSUSP_SIG "S1SUSPEND" static struct swsusp_header { - char reserved[PAGE_SIZE - 20 - MAXKEY - MAXIV - sizeof(swp_entry_t)]; - u8 key_iv[MAXKEY+MAXIV]; - swp_entry_t swsusp_info; + char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)]; + swp_entry_t image; char orig_sig[10]; char sig[10]; } __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; @@ -115,140 +104,9 @@ static struct swsusp_info swsusp_info; * Saving part... */ -/* We memorize in swapfile_used what swap devices are used for suspension */ -#define SWAPFILE_UNUSED 0 -#define SWAPFILE_SUSPEND 1 /* This is the suspending device */ -#define SWAPFILE_IGNORED 2 /* Those are other swap devices ignored for suspension */ - -static unsigned short swapfile_used[MAX_SWAPFILES]; -static unsigned short root_swap; - -static int write_page(unsigned long addr, swp_entry_t *loc); -static int bio_read_page(pgoff_t page_off, void *page); - -static u8 key_iv[MAXKEY+MAXIV]; - -#ifdef CONFIG_SWSUSP_ENCRYPT - -static int crypto_init(int mode, void **mem) -{ - int error = 0; - int len; - char *modemsg; - struct crypto_tfm *tfm; - - modemsg = mode ? "suspend not possible" : "resume not possible"; - - tfm = crypto_alloc_tfm(CIPHER, CRYPTO_TFM_MODE_CBC); - if(!tfm) { - printk(KERN_ERR "swsusp: no tfm, %s\n", modemsg); - error = -EINVAL; - goto out; - } - - if(MAXKEY < crypto_tfm_alg_min_keysize(tfm)) { - printk(KERN_ERR "swsusp: key buffer too small, %s\n", modemsg); - error = -ENOKEY; - goto fail; - } - - if (mode) - get_random_bytes(key_iv, MAXKEY+MAXIV); - - len = crypto_tfm_alg_max_keysize(tfm); - if (len > MAXKEY) - len = MAXKEY; - - if (crypto_cipher_setkey(tfm, key_iv, len)) { - printk(KERN_ERR "swsusp: key setup failure, %s\n", modemsg); - error = -EKEYREJECTED; - goto fail; - } - - len = crypto_tfm_alg_ivsize(tfm); - - if (MAXIV < len) { - printk(KERN_ERR "swsusp: iv buffer too small, %s\n", modemsg); - error = -EOVERFLOW; - goto fail; - } - - crypto_cipher_set_iv(tfm, key_iv+MAXKEY, len); - - *mem=(void *)tfm; - - goto out; - -fail: crypto_free_tfm(tfm); -out: return error; -} - -static __inline__ void crypto_exit(void *mem) -{ - crypto_free_tfm((struct crypto_tfm *)mem); -} - -static __inline__ int crypto_write(struct pbe *p, void *mem) -{ - int error = 0; - struct scatterlist src, dst; - - src.page = virt_to_page(p->address); - src.offset = 0; - src.length = PAGE_SIZE; - dst.page = virt_to_page((void *)&swsusp_header); - dst.offset = 0; - dst.length = PAGE_SIZE; - - error = crypto_cipher_encrypt((struct crypto_tfm *)mem, &dst, &src, - PAGE_SIZE); - - if (!error) - error = write_page((unsigned long)&swsusp_header, - &(p->swap_address)); - return error; -} - -static __inline__ int crypto_read(struct pbe *p, void *mem) -{ - int error = 0; - struct scatterlist src, dst; - - error = bio_read_page(swp_offset(p->swap_address), (void *)p->address); - if (!error) { - src.offset = 0; - src.length = PAGE_SIZE; - dst.offset = 0; - dst.length = PAGE_SIZE; - src.page = dst.page = virt_to_page((void *)p->address); - - error = crypto_cipher_decrypt((struct crypto_tfm *)mem, &dst, - &src, PAGE_SIZE); - } - return error; -} -#else -static __inline__ int crypto_init(int mode, void *mem) -{ - return 0; -} - -static __inline__ void crypto_exit(void *mem) -{ -} - -static __inline__ int crypto_write(struct pbe *p, void *mem) -{ - return write_page(p->address, &(p->swap_address)); -} +static unsigned short root_swap = 0xffff; -static __inline__ int crypto_read(struct pbe *p, void *mem) -{ - return bio_read_page(swp_offset(p->swap_address), (void *)p->address); -} -#endif - -static int mark_swapfiles(swp_entry_t prev) +static int mark_swapfiles(swp_entry_t start) { int error; @@ -259,8 +117,7 @@ static int mark_swapfiles(swp_entry_t prev) !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); memcpy(swsusp_header.sig,SWSUSP_SIG, 10); - memcpy(swsusp_header.key_iv, key_iv, MAXKEY+MAXIV); - swsusp_header.swsusp_info = prev; + swsusp_header.image = start; error = rw_swap_page_sync(WRITE, swp_entry(root_swap, 0), virt_to_page((unsigned long) @@ -283,7 +140,7 @@ static int mark_swapfiles(swp_entry_t prev) * devfs, since the resume code can only recognize the form /dev/hda4, * but the suspend code would see the long name.) */ -static int is_resume_device(const struct swap_info_struct *swap_info) +static inline int is_resume_device(const struct swap_info_struct *swap_info) { struct file *file = swap_info->swap_file; struct inode *inode = file->f_dentry->d_inode; @@ -294,54 +151,22 @@ static int is_resume_device(const struct swap_info_struct *swap_info) static int swsusp_swap_check(void) /* This is called before saving image */ { - int i, len; - - len=strlen(resume_file); - root_swap = 0xFFFF; - - spin_lock(&swap_lock); - for (i=0; i<MAX_SWAPFILES; i++) { - if (!(swap_info[i].flags & SWP_WRITEOK)) { - swapfile_used[i]=SWAPFILE_UNUSED; - } else { - if (!len) { - printk(KERN_WARNING "resume= option should be used to set suspend device" ); - if (root_swap == 0xFFFF) { - swapfile_used[i] = SWAPFILE_SUSPEND; - root_swap = i; - } else - swapfile_used[i] = SWAPFILE_IGNORED; - } else { - /* we ignore all swap devices that are not the resume_file */ - if (is_resume_device(&swap_info[i])) { - swapfile_used[i] = SWAPFILE_SUSPEND; - root_swap = i; - } else { - swapfile_used[i] = SWAPFILE_IGNORED; - } - } - } - } - spin_unlock(&swap_lock); - return (root_swap != 0xffff) ? 0 : -ENODEV; -} - -/** - * This is called after saving image so modification - * will be lost after resume... and that's what we want. - * we make the device unusable. A new call to - * lock_swapdevices can unlock the devices. - */ -static void lock_swapdevices(void) -{ int i; + if (!swsusp_resume_device) + return -ENODEV; spin_lock(&swap_lock); - for (i = 0; i< MAX_SWAPFILES; i++) - if (swapfile_used[i] == SWAPFILE_IGNORED) { - swap_info[i].flags ^= SWP_WRITEOK; + for (i = 0; i < MAX_SWAPFILES; i++) { + if (!(swap_info[i].flags & SWP_WRITEOK)) + continue; + if (is_resume_device(swap_info + i)) { + spin_unlock(&swap_lock); + root_swap = i; + return 0; } + } spin_unlock(&swap_lock); + return -ENODEV; } /** @@ -359,72 +184,217 @@ static void lock_swapdevices(void) static int write_page(unsigned long addr, swp_entry_t *loc) { swp_entry_t entry; - int error = 0; + int error = -ENOSPC; - entry = get_swap_page(); - if (swp_offset(entry) && - swapfile_used[swp_type(entry)] == SWAPFILE_SUSPEND) { - error = rw_swap_page_sync(WRITE, entry, - virt_to_page(addr)); - if (error == -EIO) - error = 0; - if (!error) + entry = get_swap_page_of_type(root_swap); + if (swp_offset(entry)) { + error = rw_swap_page_sync(WRITE, entry, virt_to_page(addr)); + if (!error || error == -EIO) *loc = entry; - } else - error = -ENOSPC; + } return error; } /** - * data_free - Free the swap entries used by the saved image. + * Swap map-handling functions + * + * The swap map is a data structure used for keeping track of each page + * written to the swap. It consists of many swap_map_page structures + * that contain each an array of MAP_PAGE_SIZE swap entries. + * These structures are linked together with the help of either the + * .next (in memory) or the .next_swap (in swap) member. * - * Walk the list of used swap entries and free each one. - * This is only used for cleanup when suspend fails. + * The swap map is created during suspend. At that time we need to keep + * it in memory, because we have to free all of the allocated swap + * entries if an error occurs. The memory needed is preallocated + * so that we know in advance if there's enough of it. + * + * The first swap_map_page structure is filled with the swap entries that + * correspond to the first MAP_PAGE_SIZE data pages written to swap and + * so on. After the all of the data pages have been written, the order + * of the swap_map_page structures in the map is reversed so that they + * can be read from swap in the original order. This causes the data + * pages to be loaded in exactly the same order in which they have been + * saved. + * + * During resume we only need to use one swap_map_page structure + * at a time, which means that we only need to use two memory pages for + * reading the image - one for reading the swap_map_page structures + * and the second for reading the data pages from swap. */ -static void data_free(void) + +#define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \ + / sizeof(swp_entry_t)) + +struct swap_map_page { + swp_entry_t entries[MAP_PAGE_SIZE]; + swp_entry_t next_swap; + struct swap_map_page *next; +}; + +static inline void free_swap_map(struct swap_map_page *swap_map) { - swp_entry_t entry; - struct pbe *p; + struct swap_map_page *swp; - for_each_pbe (p, pagedir_nosave) { - entry = p->swap_address; - if (entry.val) - swap_free(entry); - else - break; + while (swap_map) { + swp = swap_map->next; + free_page((unsigned long)swap_map); + swap_map = swp; } } +static struct swap_map_page *alloc_swap_map(unsigned int nr_pages) +{ + struct swap_map_page *swap_map, *swp; + unsigned n = 0; + + if (!nr_pages) + return NULL; + + pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages); + swap_map = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); + swp = swap_map; + for (n = MAP_PAGE_SIZE; n < nr_pages; n += MAP_PAGE_SIZE) { + swp->next = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); + swp = swp->next; + if (!swp) { + free_swap_map(swap_map); + return NULL; + } + } + return swap_map; +} + /** - * data_write - Write saved image to swap. - * - * Walk the list of pages in the image and sync each one to swap. + * reverse_swap_map - reverse the order of pages in the swap map + * @swap_map */ -static int data_write(void) + +static inline struct swap_map_page *reverse_swap_map(struct swap_map_page *swap_map) { - int error = 0, i = 0; - unsigned int mod = nr_copy_pages / 100; - struct pbe *p; - void *tfm; + struct swap_map_page *prev, *next; + + prev = NULL; + while (swap_map) { + next = swap_map->next; + swap_map->next = prev; + prev = swap_map; + swap_map = next; + } + return prev; +} - if ((error = crypto_init(1, &tfm))) - return error; +/** + * free_swap_map_entries - free the swap entries allocated to store + * the swap map @swap_map (this is only called in case of an error) + */ +static inline void free_swap_map_entries(struct swap_map_page *swap_map) +{ + while (swap_map) { + if (swap_map->next_swap.val) + swap_free(swap_map->next_swap); + swap_map = swap_map->next; + } +} - if (!mod) - mod = 1; +/** + * save_swap_map - save the swap map used for tracing the data pages + * stored in the swap + */ - printk( "Writing data to swap (%d pages)... ", nr_copy_pages ); - for_each_pbe (p, pagedir_nosave) { - if (!(i%mod)) - printk( "\b\b\b\b%3d%%", i / mod ); - if ((error = crypto_write(p, tfm))) { - crypto_exit(tfm); +static int save_swap_map(struct swap_map_page *swap_map, swp_entry_t *start) +{ + swp_entry_t entry = (swp_entry_t){0}; + int error; + + while (swap_map) { + swap_map->next_swap = entry; + if ((error = write_page((unsigned long)swap_map, &entry))) return error; - } - i++; + swap_map = swap_map->next; } - printk("\b\b\b\bdone\n"); - crypto_exit(tfm); + *start = entry; + return 0; +} + +/** + * free_image_entries - free the swap entries allocated to store + * the image data pages (this is only called in case of an error) + */ + +static inline void free_image_entries(struct swap_map_page *swp) +{ + unsigned k; + + while (swp) { + for (k = 0; k < MAP_PAGE_SIZE; k++) + if (swp->entries[k].val) + swap_free(swp->entries[k]); + swp = swp->next; + } +} + +/** + * The swap_map_handle structure is used for handling the swap map in + * a file-alike way + */ + +struct swap_map_handle { + struct swap_map_page *cur; + unsigned int k; +}; + +static inline void init_swap_map_handle(struct swap_map_handle *handle, + struct swap_map_page *map) +{ + handle->cur = map; + handle->k = 0; +} + +static inline int swap_map_write_page(struct swap_map_handle *handle, + unsigned long addr) +{ + int error; + + error = write_page(addr, handle->cur->entries + handle->k); + if (error) + return error; + if (++handle->k >= MAP_PAGE_SIZE) { + handle->cur = handle->cur->next; + handle->k = 0; + } + return 0; +} + +/** + * save_image_data - save the data pages pointed to by the PBEs + * from the list @pblist using the swap map handle @handle + * (assume there are @nr_pages data pages to save) + */ + +static int save_image_data(struct pbe *pblist, + struct swap_map_handle *handle, + unsigned int nr_pages) +{ + unsigned int m; + struct pbe *p; + int error = 0; + + printk("Saving image data pages (%u pages) ... ", nr_pages); + m = nr_pages / 100; + if (!m) + m = 1; + nr_pages = 0; + for_each_pbe (p, pblist) { + error = swap_map_write_page(handle, p->address); + if (error) + break; + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + } + if (!error) + printk("\b\b\b\bdone\n"); return error; } @@ -440,70 +410,70 @@ static void dump_info(void) pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname); pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus); pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages); - pr_debug(" swsusp: Pagedir: %ld Pages\n",swsusp_info.pagedir_pages); + pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info.pages); } -static void init_header(void) +static void init_header(unsigned int nr_pages) { memset(&swsusp_info, 0, sizeof(swsusp_info)); swsusp_info.version_code = LINUX_VERSION_CODE; swsusp_info.num_physpages = num_physpages; memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname)); - swsusp_info.suspend_pagedir = pagedir_nosave; swsusp_info.cpus = num_online_cpus(); - swsusp_info.image_pages = nr_copy_pages; -} - -static int close_swap(void) -{ - swp_entry_t entry; - int error; - - dump_info(); - error = write_page((unsigned long)&swsusp_info, &entry); - if (!error) { - printk( "S" ); - error = mark_swapfiles(entry); - printk( "|\n" ); - } - return error; + swsusp_info.image_pages = nr_pages; + swsusp_info.pages = nr_pages + + ((nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; } /** - * free_pagedir_entries - Free pages used by the page directory. - * - * This is used during suspend for error recovery. + * pack_orig_addresses - the .orig_address fields of the PBEs from the + * list starting at @pbe are stored in the array @buf[] (1 page) */ -static void free_pagedir_entries(void) +static inline struct pbe *pack_orig_addresses(unsigned long *buf, + struct pbe *pbe) { - int i; + int j; - for (i = 0; i < swsusp_info.pagedir_pages; i++) - swap_free(swsusp_info.pagedir[i]); + for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { + buf[j] = pbe->orig_address; + pbe = pbe->next; + } + if (!pbe) + for (; j < PAGE_SIZE / sizeof(long); j++) + buf[j] = 0; + return pbe; } - /** - * write_pagedir - Write the array of pages holding the page directory. - * @last: Last swap entry we write (needed for header). + * save_image_metadata - save the .orig_address fields of the PBEs + * from the list @pblist using the swap map handle @handle */ -static int write_pagedir(void) +static int save_image_metadata(struct pbe *pblist, + struct swap_map_handle *handle) { - int error = 0; + unsigned long *buf; unsigned int n = 0; - struct pbe *pbe; + struct pbe *p; + int error = 0; - printk( "Writing pagedir..."); - for_each_pb_page (pbe, pagedir_nosave) { - if ((error = write_page((unsigned long)pbe, &swsusp_info.pagedir[n++]))) - return error; + printk("Saving image metadata ... "); + buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC); + if (!buf) + return -ENOMEM; + p = pblist; + while (p) { + p = pack_orig_addresses(buf, p); + error = swap_map_write_page(handle, (unsigned long)buf); + if (error) + break; + n++; } - - swsusp_info.pagedir_pages = n; - printk("done (%u pages)\n", n); + free_page((unsigned long)buf); + if (!error) + printk("done (%u pages saved)\n", n); return error; } @@ -511,75 +481,125 @@ static int write_pagedir(void) * enough_swap - Make sure we have enough swap to save the image. * * Returns TRUE or FALSE after checking the total amount of swap - * space avaiable. - * - * FIXME: si_swapinfo(&i) returns all swap devices information. - * We should only consider resume_device. + * space avaiable from the resume partition. */ static int enough_swap(unsigned int nr_pages) { - struct sysinfo i; + unsigned int free_swap = swap_info[root_swap].pages - + swap_info[root_swap].inuse_pages; - si_swapinfo(&i); - pr_debug("swsusp: available swap: %lu pages\n", i.freeswap); - return i.freeswap > (nr_pages + PAGES_FOR_IO + + pr_debug("swsusp: free swap pages: %u\n", free_swap); + return free_swap > (nr_pages + PAGES_FOR_IO + (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); } /** - * write_suspend_image - Write entire image and metadata. + * swsusp_write - Write entire image and metadata. * + * It is important _NOT_ to umount filesystems at this point. We want + * them synced (in case something goes wrong) but we DO not want to mark + * filesystem clean: it is not. (And it does not matter, if we resume + * correctly, we'll mark system clean, anyway.) */ -static int write_suspend_image(void) + +int swsusp_write(struct pbe *pblist, unsigned int nr_pages) { + struct swap_map_page *swap_map; + struct swap_map_handle handle; + swp_entry_t start; int error; - if (!enough_swap(nr_copy_pages)) { + if ((error = swsusp_swap_check())) { + printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n"); + return error; + } + if (!enough_swap(nr_pages)) { printk(KERN_ERR "swsusp: Not enough free swap\n"); return -ENOSPC; } - init_header(); - if ((error = data_write())) - goto FreeData; + init_header(nr_pages); + swap_map = alloc_swap_map(swsusp_info.pages); + if (!swap_map) + return -ENOMEM; + init_swap_map_handle(&handle, swap_map); + + error = swap_map_write_page(&handle, (unsigned long)&swsusp_info); + if (!error) + error = save_image_metadata(pblist, &handle); + if (!error) + error = save_image_data(pblist, &handle, nr_pages); + if (error) + goto Free_image_entries; - if ((error = write_pagedir())) - goto FreePagedir; + swap_map = reverse_swap_map(swap_map); + error = save_swap_map(swap_map, &start); + if (error) + goto Free_map_entries; - if ((error = close_swap())) - goto FreePagedir; - Done: - memset(key_iv, 0, MAXKEY+MAXIV); + dump_info(); + printk( "S" ); + error = mark_swapfiles(start); + printk( "|\n" ); + if (error) + goto Free_map_entries; + +Free_swap_map: + free_swap_map(swap_map); return error; - FreePagedir: - free_pagedir_entries(); - FreeData: - data_free(); - goto Done; + +Free_map_entries: + free_swap_map_entries(swap_map); +Free_image_entries: + free_image_entries(swap_map); + goto Free_swap_map; } -/* It is important _NOT_ to umount filesystems at this point. We want - * them synced (in case something goes wrong) but we DO not want to mark - * filesystem clean: it is not. (And it does not matter, if we resume - * correctly, we'll mark system clean, anyway.) +/** + * swsusp_shrink_memory - Try to free as much memory as needed + * + * ... but do not OOM-kill anyone + * + * Notice: all userland should be stopped before it is called, or + * livelock is possible. */ -int swsusp_write(void) -{ - int error; - if ((error = swsusp_swap_check())) { - printk(KERN_ERR "swsusp: cannot find swap device, try swapon -a.\n"); - return error; - } - lock_swapdevices(); - error = write_suspend_image(); - /* This will unlock ignored swap devices since writing is finished */ - lock_swapdevices(); - return error; -} +#define SHRINK_BITE 10000 +int swsusp_shrink_memory(void) +{ + long size, tmp; + struct zone *zone; + unsigned long pages = 0; + unsigned int i = 0; + char *p = "-\\|/"; + + printk("Shrinking memory... "); + do { + size = 2 * count_highmem_pages(); + size += size / 50 + count_data_pages(); + size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE + + PAGES_FOR_IO; + tmp = size; + for_each_zone (zone) + if (!is_highmem(zone)) + tmp -= zone->free_pages; + if (tmp > 0) { + tmp = shrink_all_memory(SHRINK_BITE); + if (!tmp) + return -ENOMEM; + pages += tmp; + } else if (size > (image_size * 1024 * 1024) / PAGE_SIZE) { + tmp = shrink_all_memory(SHRINK_BITE); + pages += tmp; + } + printk("\b%c", p[i++%4]); + } while (tmp > 0); + printk("\bdone (%lu pages freed)\n", pages); + return 0; +} int swsusp_suspend(void) { @@ -677,7 +697,6 @@ static void copy_page_backup_list(struct pbe *dst, struct pbe *src) /* We assume both lists contain the same number of elements */ while (src) { dst->orig_address = src->orig_address; - dst->swap_address = src->swap_address; dst = dst->next; src = src->next; } @@ -757,198 +776,224 @@ static int bio_write_page(pgoff_t page_off, void *page) return submit(WRITE, page_off, page); } -/* - * Sanity check if this image makes sense with this kernel/swap context - * I really don't think that it's foolproof but more than nothing.. +/** + * The following functions allow us to read data using a swap map + * in a file-alike way */ -static const char *sanity_check(void) +static inline void release_swap_map_reader(struct swap_map_handle *handle) { - dump_info(); - if (swsusp_info.version_code != LINUX_VERSION_CODE) - return "kernel version"; - if (swsusp_info.num_physpages != num_physpages) - return "memory size"; - if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname)) - return "system type"; - if (strcmp(swsusp_info.uts.release,system_utsname.release)) - return "kernel release"; - if (strcmp(swsusp_info.uts.version,system_utsname.version)) - return "version"; - if (strcmp(swsusp_info.uts.machine,system_utsname.machine)) - return "machine"; -#if 0 - /* We can't use number of online CPUs when we use hotplug to remove them ;-))) */ - if (swsusp_info.cpus != num_possible_cpus()) - return "number of cpus"; -#endif - return NULL; + if (handle->cur) + free_page((unsigned long)handle->cur); + handle->cur = NULL; } - -static int check_header(void) +static inline int get_swap_map_reader(struct swap_map_handle *handle, + swp_entry_t start) { - const char *reason = NULL; int error; - if ((error = bio_read_page(swp_offset(swsusp_header.swsusp_info), &swsusp_info))) + if (!swp_offset(start)) + return -EINVAL; + handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); + if (!handle->cur) + return -ENOMEM; + error = bio_read_page(swp_offset(start), handle->cur); + if (error) { + release_swap_map_reader(handle); return error; - - /* Is this same machine? */ - if ((reason = sanity_check())) { - printk(KERN_ERR "swsusp: Resume mismatch: %s\n",reason); - return -EPERM; } - nr_copy_pages = swsusp_info.image_pages; - return error; + handle->k = 0; + return 0; } -static int check_sig(void) +static inline int swap_map_read_page(struct swap_map_handle *handle, void *buf) { + unsigned long offset; int error; - memset(&swsusp_header, 0, sizeof(swsusp_header)); - if ((error = bio_read_page(0, &swsusp_header))) - return error; - if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { - memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); - memcpy(key_iv, swsusp_header.key_iv, MAXKEY+MAXIV); - memset(swsusp_header.key_iv, 0, MAXKEY+MAXIV); - - /* - * Reset swap signature now. - */ - error = bio_write_page(0, &swsusp_header); - } else { + if (!handle->cur) + return -EINVAL; + offset = swp_offset(handle->cur->entries[handle->k]); + if (!offset) return -EINVAL; + error = bio_read_page(offset, buf); + if (error) + return error; + if (++handle->k >= MAP_PAGE_SIZE) { + handle->k = 0; + offset = swp_offset(handle->cur->next_swap); + if (!offset) + release_swap_map_reader(handle); + else + error = bio_read_page(offset, handle->cur); } - if (!error) - pr_debug("swsusp: Signature found, resuming\n"); return error; } -/** - * data_read - Read image pages from swap. - * - * You do not need to check for overlaps, check_pagedir() - * already did that. - */ - -static int data_read(struct pbe *pblist) +static int check_header(void) { - struct pbe *p; - int error = 0; - int i = 0; - int mod = swsusp_info.image_pages / 100; - void *tfm; - - if ((error = crypto_init(0, &tfm))) - return error; - - if (!mod) - mod = 1; - - printk("swsusp: Reading image data (%lu pages): ", - swsusp_info.image_pages); - - for_each_pbe (p, pblist) { - if (!(i % mod)) - printk("\b\b\b\b%3d%%", i / mod); + char *reason = NULL; - if ((error = crypto_read(p, tfm))) { - crypto_exit(tfm); - return error; - } - - i++; + dump_info(); + if (swsusp_info.version_code != LINUX_VERSION_CODE) + reason = "kernel version"; + if (swsusp_info.num_physpages != num_physpages) + reason = "memory size"; + if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname)) + reason = "system type"; + if (strcmp(swsusp_info.uts.release,system_utsname.release)) + reason = "kernel release"; + if (strcmp(swsusp_info.uts.version,system_utsname.version)) + reason = "version"; + if (strcmp(swsusp_info.uts.machine,system_utsname.machine)) + reason = "machine"; + if (reason) { + printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); + return -EPERM; } - printk("\b\b\b\bdone\n"); - crypto_exit(tfm); - return error; + return 0; } /** - * read_pagedir - Read page backup list pages from swap + * load_image_data - load the image data using the swap map handle + * @handle and store them using the page backup list @pblist + * (assume there are @nr_pages pages to load) */ -static int read_pagedir(struct pbe *pblist) +static int load_image_data(struct pbe *pblist, + struct swap_map_handle *handle, + unsigned int nr_pages) { - struct pbe *pbpage, *p; - unsigned int i = 0; int error; + unsigned int m; + struct pbe *p; if (!pblist) - return -EFAULT; - - printk("swsusp: Reading pagedir (%lu pages)\n", - swsusp_info.pagedir_pages); - - for_each_pb_page (pbpage, pblist) { - unsigned long offset = swp_offset(swsusp_info.pagedir[i++]); - - error = -EFAULT; - if (offset) { - p = (pbpage + PB_PAGE_SKIP)->next; - error = bio_read_page(offset, (void *)pbpage); - (pbpage + PB_PAGE_SKIP)->next = p; - } + return -EINVAL; + printk("Loading image data pages (%u pages) ... ", nr_pages); + m = nr_pages / 100; + if (!m) + m = 1; + nr_pages = 0; + p = pblist; + while (p) { + error = swap_map_read_page(handle, (void *)p->address); if (error) break; + p = p->next; + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; } - if (!error) - BUG_ON(i != swsusp_info.pagedir_pages); - + printk("\b\b\b\bdone\n"); return error; } +/** + * unpack_orig_addresses - copy the elements of @buf[] (1 page) to + * the PBEs in the list starting at @pbe + */ -static int check_suspend_image(void) +static inline struct pbe *unpack_orig_addresses(unsigned long *buf, + struct pbe *pbe) { - int error = 0; + int j; - if ((error = check_sig())) - return error; - - if ((error = check_header())) - return error; - - return 0; + for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { + pbe->orig_address = buf[j]; + pbe = pbe->next; + } + return pbe; } -static int read_suspend_image(void) +/** + * load_image_metadata - load the image metadata using the swap map + * handle @handle and put them into the PBEs in the list @pblist + */ + +static int load_image_metadata(struct pbe *pblist, struct swap_map_handle *handle) { - int error = 0; struct pbe *p; + unsigned long *buf; + unsigned int n = 0; + int error = 0; - if (!(p = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 0))) + printk("Loading image metadata ... "); + buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC); + if (!buf) return -ENOMEM; - - if ((error = read_pagedir(p))) - return error; - create_pbe_list(p, nr_copy_pages); - mark_unsafe_pages(p); - pagedir_nosave = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); - if (pagedir_nosave) { - create_pbe_list(pagedir_nosave, nr_copy_pages); - copy_page_backup_list(pagedir_nosave, p); + p = pblist; + while (p) { + error = swap_map_read_page(handle, buf); + if (error) + break; + p = unpack_orig_addresses(buf, p); + n++; } - free_pagedir(p); - if (!pagedir_nosave) - return -ENOMEM; + free_page((unsigned long)buf); + if (!error) + printk("done (%u pages loaded)\n", n); + return error; +} - /* Allocate memory for the image and read the data from swap */ +int swsusp_read(struct pbe **pblist_ptr) +{ + int error; + struct pbe *p, *pblist; + struct swap_map_handle handle; + unsigned int nr_pages; - error = alloc_data_pages(pagedir_nosave, GFP_ATOMIC, 1); + if (IS_ERR(resume_bdev)) { + pr_debug("swsusp: block device not initialised\n"); + return PTR_ERR(resume_bdev); + } + error = get_swap_map_reader(&handle, swsusp_header.image); if (!error) - error = data_read(pagedir_nosave); + error = swap_map_read_page(&handle, &swsusp_info); + if (!error) + error = check_header(); + if (error) + return error; + nr_pages = swsusp_info.image_pages; + p = alloc_pagedir(nr_pages, GFP_ATOMIC, 0); + if (!p) + return -ENOMEM; + error = load_image_metadata(p, &handle); + if (!error) { + mark_unsafe_pages(p); + pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); + if (pblist) + copy_page_backup_list(pblist, p); + free_pagedir(p); + if (!pblist) + error = -ENOMEM; + + /* Allocate memory for the image and read the data from swap */ + if (!error) + error = alloc_data_pages(pblist, GFP_ATOMIC, 1); + if (!error) { + release_eaten_pages(); + error = load_image_data(pblist, &handle, nr_pages); + } + if (!error) + *pblist_ptr = pblist; + } + release_swap_map_reader(&handle); + blkdev_put(resume_bdev); + + if (!error) + pr_debug("swsusp: Reading resume file was successful\n"); + else + pr_debug("swsusp: Error %d resuming\n", error); return error; } /** - * swsusp_check - Check for saved image in swap + * swsusp_check - Check for swsusp signature in the resume device */ int swsusp_check(void) @@ -958,40 +1003,27 @@ int swsusp_check(void) resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); if (!IS_ERR(resume_bdev)) { set_blocksize(resume_bdev, PAGE_SIZE); - error = check_suspend_image(); + memset(&swsusp_header, 0, sizeof(swsusp_header)); + if ((error = bio_read_page(0, &swsusp_header))) + return error; + if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { + memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); + /* Reset swap signature now */ + error = bio_write_page(0, &swsusp_header); + } else { + return -EINVAL; + } if (error) - blkdev_put(resume_bdev); - } else + blkdev_put(resume_bdev); + else + pr_debug("swsusp: Signature found, resuming\n"); + } else { error = PTR_ERR(resume_bdev); - - if (!error) - pr_debug("swsusp: resume file found\n"); - else - pr_debug("swsusp: Error %d check for resume file\n", error); - return error; -} - -/** - * swsusp_read - Read saved image from swap. - */ - -int swsusp_read(void) -{ - int error; - - if (IS_ERR(resume_bdev)) { - pr_debug("swsusp: block device not initialised\n"); - return PTR_ERR(resume_bdev); } - error = read_suspend_image(); - blkdev_put(resume_bdev); - memset(key_iv, 0, MAXKEY+MAXIV); + if (error) + pr_debug("swsusp: Error %d check for resume file\n", error); - if (!error) - pr_debug("swsusp: Reading resume file was successful\n"); - else - pr_debug("swsusp: Error %d resuming\n", error); return error; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 345f4a1..a85047b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -108,7 +108,7 @@ extern int pwrsw_enabled; extern int unaligned_enabled; #endif -#ifdef CONFIG_ARCH_S390 +#ifdef CONFIG_S390 #ifdef CONFIG_MATHEMU extern int sysctl_ieee_emulation_warnings; #endif @@ -542,7 +542,7 @@ static ctl_table kern_table[] = { .extra1 = &minolduid, .extra2 = &maxolduid, }, -#ifdef CONFIG_ARCH_S390 +#ifdef CONFIG_S390 #ifdef CONFIG_MATHEMU { .ctl_name = KERN_IEEE_EMULATION_WARNINGS, @@ -644,7 +644,7 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, -#if defined(CONFIG_ARCH_S390) +#if defined(CONFIG_S390) { .ctl_name = KERN_SPIN_RETRY, .procname = "spin_retry", diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 156822e..80598cf 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -32,7 +32,7 @@ config MAGIC_SYSRQ config LOG_BUF_SHIFT int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL range 12 21 - default 17 if ARCH_S390 + default 17 if S390 default 16 if X86_NUMAQ || IA64 default 15 if SMP default 14 @@ -172,7 +172,8 @@ config DEBUG_VM bool "Debug VM" depends on DEBUG_KERNEL help - Enable this to debug the virtual-memory system. + Enable this to turn on extended checks in the virtual-memory system + that may impact performance. If unsure, say N. diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1ff8dce..3b48205 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -142,8 +142,7 @@ swiotlb_init_with_default_size (size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * - (1 << IO_TLB_SHIFT), 0x100000000); + io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); @@ -11,7 +11,7 @@ choice config FLATMEM_MANUAL bool "Flat Memory" - depends on !ARCH_DISCONTIGMEM_ENABLE || ARCH_FLATMEM_ENABLE + depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE help This option allows you to change some of the ways that Linux manages its memory internally. Most users will diff --git a/mm/bootmem.c b/mm/bootmem.c index 16b9465..35c3229 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -296,20 +296,12 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) unsigned long v = ~map[i / BITS_PER_LONG]; if (gofast && v == ~0UL) { - int j, order; + int order; page = pfn_to_page(pfn); count += BITS_PER_LONG; - __ClearPageReserved(page); order = ffs(BITS_PER_LONG) - 1; - set_page_refs(page, order); - for (j = 1; j < BITS_PER_LONG; j++) { - if (j + 16 < BITS_PER_LONG) - prefetchw(page + j + 16); - __ClearPageReserved(page + j); - set_page_count(page + j, 0); - } - __free_pages(page, order); + __free_pages_bootmem(page, order); i += BITS_PER_LONG; page += BITS_PER_LONG; } else if (v) { @@ -319,9 +311,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) for (m = 1; m && i < idx; m<<=1, page++, i++) { if (v & m) { count++; - __ClearPageReserved(page); - set_page_refs(page, 0); - __free_page(page); + __free_pages_bootmem(page, 0); } } } else { @@ -339,9 +329,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) count = 0; for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) { count++; - __ClearPageReserved(page); - set_page_count(page, 1); - __free_page(page); + __free_pages_bootmem(page, 0); } total += count; bdata->node_bootmem_map = NULL; @@ -393,15 +381,14 @@ unsigned long __init free_all_bootmem (void) return(free_all_bootmem_core(NODE_DATA(0))); } -void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, - unsigned long limit) +void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) { pg_data_t *pgdat = pgdat_list; void *ptr; for_each_pgdat(pgdat) if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, - align, goal, limit))) + align, goal, 0))) return(ptr); /* @@ -413,15 +400,40 @@ void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, un } -void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal, unsigned long limit) +void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, + unsigned long goal) { void *ptr; - ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit); + ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); if (ptr) return (ptr); - return __alloc_bootmem_limit(size, align, goal, limit); + return __alloc_bootmem(size, align, goal); +} + +#define LOW32LIMIT 0xffffffff + +void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) +{ + pg_data_t *pgdat = pgdat_list; + void *ptr; + + for_each_pgdat(pgdat) + if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, + align, goal, LOW32LIMIT))) + return(ptr); + + /* + * Whoops, we cannot satisfy the allocation request. + */ + printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size); + panic("Out of low memory"); + return NULL; } +void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, + unsigned long align, unsigned long goal) +{ + return __alloc_bootmem_core(pgdat->bdata, size, align, goal, LOW32LIMIT); +} diff --git a/mm/filemap.c b/mm/filemap.c index 6e1d08a..4ef24a3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -555,11 +555,12 @@ repeat: page_cache_get(page); if (TestSetPageLocked(page)) { read_unlock_irq(&mapping->tree_lock); - lock_page(page); + __lock_page(page); read_lock_irq(&mapping->tree_lock); /* Has the page been truncated while we slept? */ - if (page->mapping != mapping || page->index != offset) { + if (unlikely(page->mapping != mapping || + page->index != offset)) { unlock_page(page); page_cache_release(page); goto repeat; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3e52df7..f4c43d7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -11,6 +11,8 @@ #include <linux/highmem.h> #include <linux/nodemask.h> #include <linux/pagemap.h> +#include <linux/mempolicy.h> + #include <asm/page.h> #include <asm/pgtable.h> @@ -36,18 +38,21 @@ static void enqueue_huge_page(struct page *page) free_huge_pages_node[nid]++; } -static struct page *dequeue_huge_page(void) +static struct page *dequeue_huge_page(struct vm_area_struct *vma, + unsigned long address) { int nid = numa_node_id(); struct page *page = NULL; + struct zonelist *zonelist = huge_zonelist(vma, address); + struct zone **z; - if (list_empty(&hugepage_freelists[nid])) { - for (nid = 0; nid < MAX_NUMNODES; ++nid) - if (!list_empty(&hugepage_freelists[nid])) - break; + for (z = zonelist->zones; *z; z++) { + nid = (*z)->zone_pgdat->node_id; + if (!list_empty(&hugepage_freelists[nid])) + break; } - if (nid >= 0 && nid < MAX_NUMNODES && - !list_empty(&hugepage_freelists[nid])) { + + if (*z) { page = list_entry(hugepage_freelists[nid].next, struct page, lru); list_del(&page->lru); @@ -85,13 +90,13 @@ void free_huge_page(struct page *page) spin_unlock(&hugetlb_lock); } -struct page *alloc_huge_page(void) +struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) { struct page *page; int i; spin_lock(&hugetlb_lock); - page = dequeue_huge_page(); + page = dequeue_huge_page(vma, addr); if (!page) { spin_unlock(&hugetlb_lock); return NULL; @@ -194,7 +199,7 @@ static unsigned long set_max_huge_pages(unsigned long count) spin_lock(&hugetlb_lock); try_to_free_low(count); while (count < nr_huge_pages) { - struct page *page = dequeue_huge_page(); + struct page *page = dequeue_huge_page(NULL, 0); if (!page) break; update_and_free_page(page); @@ -261,11 +266,12 @@ struct vm_operations_struct hugetlb_vm_ops = { .nopage = hugetlb_nopage, }; -static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page) +static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, + int writable) { pte_t entry; - if (vma->vm_flags & VM_WRITE) { + if (writable) { entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); } else { @@ -277,12 +283,27 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page) return entry; } +static void set_huge_ptep_writable(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + pte_t entry; + + entry = pte_mkwrite(pte_mkdirty(*ptep)); + ptep_set_access_flags(vma, address, ptep, entry, 1); + update_mmu_cache(vma, address, entry); + lazy_mmu_prot_update(entry); +} + + int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { pte_t *src_pte, *dst_pte, entry; struct page *ptepage; unsigned long addr; + int cow; + + cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { src_pte = huge_pte_offset(src, addr); @@ -294,6 +315,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, spin_lock(&dst->page_table_lock); spin_lock(&src->page_table_lock); if (!pte_none(*src_pte)) { + if (cow) + ptep_set_wrprotect(src, addr, src_pte); entry = *src_pte; ptepage = pte_page(entry); get_page(ptepage); @@ -345,57 +368,63 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, flush_tlb_range(vma, start, end); } -static struct page *find_lock_huge_page(struct address_space *mapping, - unsigned long idx) +static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, pte_t pte) { - struct page *page; - int err; - struct inode *inode = mapping->host; - unsigned long size; + struct page *old_page, *new_page; + int i, avoidcopy; -retry: - page = find_lock_page(mapping, idx); - if (page) - goto out; + old_page = pte_page(pte); - /* Check to make sure the mapping hasn't been truncated */ - size = i_size_read(inode) >> HPAGE_SHIFT; - if (idx >= size) - goto out; + /* If no-one else is actually using this page, avoid the copy + * and just make the page writable */ + avoidcopy = (page_count(old_page) == 1); + if (avoidcopy) { + set_huge_ptep_writable(vma, address, ptep); + return VM_FAULT_MINOR; + } - if (hugetlb_get_quota(mapping)) - goto out; - page = alloc_huge_page(); - if (!page) { - hugetlb_put_quota(mapping); - goto out; + page_cache_get(old_page); + new_page = alloc_huge_page(vma, address); + + if (!new_page) { + page_cache_release(old_page); + + /* Logically this is OOM, not a SIGBUS, but an OOM + * could cause the kernel to go killing other + * processes which won't help the hugepage situation + * at all (?) */ + return VM_FAULT_SIGBUS; } - err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); - if (err) { - put_page(page); - hugetlb_put_quota(mapping); - if (err == -EEXIST) - goto retry; - page = NULL; + spin_unlock(&mm->page_table_lock); + for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) + copy_user_highpage(new_page + i, old_page + i, + address + i*PAGE_SIZE); + spin_lock(&mm->page_table_lock); + + ptep = huge_pte_offset(mm, address & HPAGE_MASK); + if (likely(pte_same(*ptep, pte))) { + /* Break COW */ + set_huge_pte_at(mm, address, ptep, + make_huge_pte(vma, new_page, 1)); + /* Make the old page be freed below */ + new_page = old_page; } -out: - return page; + page_cache_release(new_page); + page_cache_release(old_page); + return VM_FAULT_MINOR; } -int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access) +int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, int write_access) { int ret = VM_FAULT_SIGBUS; unsigned long idx; unsigned long size; - pte_t *pte; struct page *page; struct address_space *mapping; - - pte = huge_pte_alloc(mm, address); - if (!pte) - goto out; + pte_t new_pte; mapping = vma->vm_file->f_mapping; idx = ((address - vma->vm_start) >> HPAGE_SHIFT) @@ -405,9 +434,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * Use page lock to guard against racing truncation * before we get page_table_lock. */ - page = find_lock_huge_page(mapping, idx); - if (!page) - goto out; +retry: + page = find_lock_page(mapping, idx); + if (!page) { + if (hugetlb_get_quota(mapping)) + goto out; + page = alloc_huge_page(vma, address); + if (!page) { + hugetlb_put_quota(mapping); + goto out; + } + + if (vma->vm_flags & VM_SHARED) { + int err; + + err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + if (err) { + put_page(page); + hugetlb_put_quota(mapping); + if (err == -EEXIST) + goto retry; + goto out; + } + } else + lock_page(page); + } spin_lock(&mm->page_table_lock); size = i_size_read(mapping->host) >> HPAGE_SHIFT; @@ -415,11 +466,19 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, goto backout; ret = VM_FAULT_MINOR; - if (!pte_none(*pte)) + if (!pte_none(*ptep)) goto backout; add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); - set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page)); + new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) + && (vma->vm_flags & VM_SHARED))); + set_huge_pte_at(mm, address, ptep, new_pte); + + if (write_access && !(vma->vm_flags & VM_SHARED)) { + /* Optimization, do the COW without a second fault */ + ret = hugetlb_cow(mm, vma, address, ptep, new_pte); + } + spin_unlock(&mm->page_table_lock); unlock_page(page); out: @@ -433,6 +492,33 @@ backout: goto out; } +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + pte_t *ptep; + pte_t entry; + int ret; + + ptep = huge_pte_alloc(mm, address); + if (!ptep) + return VM_FAULT_OOM; + + entry = *ptep; + if (pte_none(entry)) + return hugetlb_no_page(mm, vma, address, ptep, write_access); + + ret = VM_FAULT_MINOR; + + spin_lock(&mm->page_table_lock); + /* Check for a racing update before calling hugetlb_cow */ + if (likely(pte_same(entry, *ptep))) + if (write_access && !pte_write(entry)) + ret = hugetlb_cow(mm, vma, address, ptep, entry); + spin_unlock(&mm->page_table_lock); + + return ret; +} + int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i) diff --git a/mm/internal.h b/mm/internal.h index 6bf134e..17256bb 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -9,5 +9,22 @@ * 2 of the License, or (at your option) any later version. */ -/* page_alloc.c */ -extern void set_page_refs(struct page *page, int order); +static inline void set_page_refs(struct page *page, int order) +{ +#ifdef CONFIG_MMU + set_page_count(page, 1); +#else + int i; + + /* + * We need to reference all the pages for this order, otherwise if + * anyone accesses one of the pages with (get/put) it will be freed. + * - eg: access_process_vm() + */ + for (i = 0; i < (1 << order); i++) + set_page_count(page + i, 1); +#endif /* CONFIG_MMU */ +} + +extern void fastcall __init __free_pages_bootmem(struct page *page, + unsigned int order); diff --git a/mm/madvise.c b/mm/madvise.c index 2b7cf04..ae0ae3e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -140,6 +140,36 @@ static long madvise_dontneed(struct vm_area_struct * vma, return 0; } +/* + * Application wants to free up the pages and associated backing store. + * This is effectively punching a hole into the middle of a file. + * + * NOTE: Currently, only shmfs/tmpfs is supported for this operation. + * Other filesystems return -ENOSYS. + */ +static long madvise_remove(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct address_space *mapping; + loff_t offset, endoff; + + if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) + return -EINVAL; + + if (!vma->vm_file || !vma->vm_file->f_mapping + || !vma->vm_file->f_mapping->host) { + return -EINVAL; + } + + mapping = vma->vm_file->f_mapping; + + offset = (loff_t)(start - vma->vm_start) + + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + endoff = (loff_t)(end - vma->vm_start - 1) + + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + return vmtruncate_range(mapping->host, offset, endoff); +} + static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) @@ -152,6 +182,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, case MADV_RANDOM: error = madvise_behavior(vma, prev, start, end, behavior); break; + case MADV_REMOVE: + error = madvise_remove(vma, start, end); + break; case MADV_WILLNEED: error = madvise_willneed(vma, prev, start, end); @@ -190,6 +223,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, * some pages ahead. * MADV_DONTNEED - the application is finished with the given range, * so the kernel can free resources associated with it. + * MADV_REMOVE - the application wants to free up the given range of + * pages and associated backing store. * * return values: * zero - success diff --git a/mm/memory.c b/mm/memory.c index d8dde07..7197f9b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1498,7 +1498,7 @@ gotten: update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); lru_cache_add_active(new_page); - page_add_anon_rmap(new_page, vma, address); + page_add_new_anon_rmap(new_page, vma, address); /* Free the old page.. */ new_page = old_page; @@ -1770,9 +1770,32 @@ out_big: out_busy: return -ETXTBSY; } - EXPORT_SYMBOL(vmtruncate); +int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) +{ + struct address_space *mapping = inode->i_mapping; + + /* + * If the underlying filesystem is not going to provide + * a way to truncate a range of blocks (punch a hole) - + * we should return failure right now. + */ + if (!inode->i_op || !inode->i_op->truncate_range) + return -ENOSYS; + + down(&inode->i_sem); + down_write(&inode->i_alloc_sem); + unmap_mapping_range(mapping, offset, (end - offset), 1); + truncate_inode_pages_range(mapping, offset, end); + inode->i_op->truncate_range(inode, offset, end); + up_write(&inode->i_alloc_sem); + up(&inode->i_sem); + + return 0; +} +EXPORT_SYMBOL(vmtruncate_range); + /* * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen @@ -1954,8 +1977,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, goto release; inc_mm_counter(mm, anon_rss); lru_cache_add_active(page); - SetPageReferenced(page); - page_add_anon_rmap(page, vma, address); + page_add_new_anon_rmap(page, vma, address); } else { /* Map the ZERO_PAGE - vm_page_prot is readonly */ page = ZERO_PAGE(address); @@ -2086,7 +2108,7 @@ retry: if (anon) { inc_mm_counter(mm, anon_rss); lru_cache_add_active(new_page); - page_add_anon_rmap(new_page, vma, address); + page_add_new_anon_rmap(new_page, vma, address); } else { inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f6d4af8..a918f77 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -42,7 +42,6 @@ extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, int nr_pages); static int __add_section(struct zone *zone, unsigned long phys_start_pfn) { - struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int ret; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 72f402c..0f1d2b8 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -93,7 +93,7 @@ static kmem_cache_t *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ -static int policy_zone; +int policy_zone = ZONE_DMA; struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ @@ -131,17 +131,8 @@ static struct zonelist *bind_zonelist(nodemask_t *nodes) if (!zl) return NULL; num = 0; - for_each_node_mask(nd, *nodes) { - int k; - for (k = MAX_NR_ZONES-1; k >= 0; k--) { - struct zone *z = &NODE_DATA(nd)->node_zones[k]; - if (!z->present_pages) - continue; - zl->zones[num++] = z; - if (k > policy_zone) - policy_zone = k; - } - } + for_each_node_mask(nd, *nodes) + zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone]; zl->zones[num] = NULL; return zl; } @@ -785,6 +776,34 @@ static unsigned offset_il_node(struct mempolicy *pol, return nid; } +/* Determine a node number for interleave */ +static inline unsigned interleave_nid(struct mempolicy *pol, + struct vm_area_struct *vma, unsigned long addr, int shift) +{ + if (vma) { + unsigned long off; + + off = vma->vm_pgoff; + off += (addr - vma->vm_start) >> shift; + return offset_il_node(pol, vma, off); + } else + return interleave_nodes(pol); +} + +/* Return a zonelist suitable for a huge page allocation. */ +struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) +{ + struct mempolicy *pol = get_vma_policy(current, vma, addr); + + if (pol->policy == MPOL_INTERLEAVE) { + unsigned nid; + + nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); + return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER); + } + return zonelist_policy(GFP_HIGHUSER, pol); +} + /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, @@ -833,15 +852,8 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) if (unlikely(pol->policy == MPOL_INTERLEAVE)) { unsigned nid; - if (vma) { - unsigned long off; - off = vma->vm_pgoff; - off += (addr - vma->vm_start) >> PAGE_SHIFT; - nid = offset_il_node(pol, vma, off); - } else { - /* fall back to process interleaving */ - nid = interleave_nodes(pol); - } + + nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); return alloc_page_interleave(gfp, 0, nid); } return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); @@ -940,54 +952,6 @@ void __mpol_free(struct mempolicy *p) } /* - * Hugetlb policy. Same as above, just works with node numbers instead of - * zonelists. - */ - -/* Find first node suitable for an allocation */ -int mpol_first_node(struct vm_area_struct *vma, unsigned long addr) -{ - struct mempolicy *pol = get_vma_policy(current, vma, addr); - - switch (pol->policy) { - case MPOL_DEFAULT: - return numa_node_id(); - case MPOL_BIND: - return pol->v.zonelist->zones[0]->zone_pgdat->node_id; - case MPOL_INTERLEAVE: - return interleave_nodes(pol); - case MPOL_PREFERRED: - return pol->v.preferred_node >= 0 ? - pol->v.preferred_node : numa_node_id(); - } - BUG(); - return 0; -} - -/* Find secondary valid nodes for an allocation */ -int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr) -{ - struct mempolicy *pol = get_vma_policy(current, vma, addr); - - switch (pol->policy) { - case MPOL_PREFERRED: - case MPOL_DEFAULT: - case MPOL_INTERLEAVE: - return 1; - case MPOL_BIND: { - struct zone **z; - for (z = pol->v.zonelist->zones; *z; z++) - if ((*z)->zone_pgdat->node_id == nid) - return 1; - return 0; - } - default: - BUG(); - return 0; - } -} - -/* * Shared memory backing store policy support. * * Remember policies even when nobody has shared memory mapped. @@ -1177,3 +1177,10 @@ int in_gate_area_no_task(unsigned long addr) { return 0; } + +struct page *filemap_nopage(struct vm_area_struct *area, + unsigned long address, int *type) +{ + BUG(); + return NULL; +} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fe14a8c..fd47494 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -36,6 +36,7 @@ #include <linux/memory_hotplug.h> #include <linux/nodemask.h> #include <linux/vmalloc.h> +#include <linux/mempolicy.h> #include <asm/tlbflush.h> #include "internal.h" @@ -53,6 +54,8 @@ unsigned long totalram_pages __read_mostly; unsigned long totalhigh_pages __read_mostly; long nr_swap_pages; +static void fastcall free_hot_cold_page(struct page *page, int cold); + /* * results with 256, 32 in the lowmem_reserve sysctl: * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) @@ -81,6 +84,7 @@ int min_free_kbytes = 1024; unsigned long __initdata nr_kernel_pages; unsigned long __initdata nr_all_pages; +#ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { int ret = 0; @@ -122,16 +126,23 @@ static int bad_range(struct zone *zone, struct page *page) return 0; } -static void bad_page(const char *function, struct page *page) +#else +static inline int bad_range(struct zone *zone, struct page *page) +{ + return 0; +} +#endif + +static void bad_page(struct page *page) { - printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", - function, current->comm, page); - printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", - (int)(2*sizeof(unsigned long)), (unsigned long)page->flags, - page->mapping, page_mapcount(page), page_count(page)); - printk(KERN_EMERG "Backtrace:\n"); + printk(KERN_EMERG "Bad page state in process '%s'\n" + "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" + "Trying to fix it up, but a reboot is needed\n" + "Backtrace:\n", + current->comm, page, (int)(2*sizeof(unsigned long)), + (unsigned long)page->flags, page->mapping, + page_mapcount(page), page_count(page)); dump_stack(); - printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); page->flags &= ~(1 << PG_lru | 1 << PG_private | 1 << PG_locked | @@ -184,19 +195,15 @@ static void destroy_compound_page(struct page *page, unsigned long order) int i; int nr_pages = 1 << order; - if (!PageCompound(page)) - return; - - if (page[1].index != order) - bad_page(__FUNCTION__, page); + if (unlikely(page[1].index != order)) + bad_page(page); for (i = 0; i < nr_pages; i++) { struct page *p = page + i; - if (!PageCompound(p)) - bad_page(__FUNCTION__, page); - if (page_private(p) != (unsigned long)page) - bad_page(__FUNCTION__, page); + if (unlikely(!PageCompound(p) | + (page_private(p) != (unsigned long)page))) + bad_page(page); ClearPageCompound(p); } } @@ -255,14 +262,20 @@ __find_combined_index(unsigned long page_idx, unsigned int order) /* * This function checks whether a page is free && is the buddy * we can do coalesce a page and its buddy if - * (a) the buddy is free && - * (b) the buddy is on the buddy system && - * (c) a page and its buddy have the same order. + * (a) the buddy is not in a hole && + * (b) the buddy is free && + * (c) the buddy is on the buddy system && + * (d) a page and its buddy have the same order. * for recording page's order, we use page_private(page) and PG_private. * */ static inline int page_is_buddy(struct page *page, int order) { +#ifdef CONFIG_HOLES_IN_ZONE + if (!pfn_valid(page_to_pfn(page))) + return 0; +#endif + if (PagePrivate(page) && (page_order(page) == order) && page_count(page) == 0) @@ -300,7 +313,7 @@ static inline void __free_pages_bulk (struct page *page, unsigned long page_idx; int order_size = 1 << order; - if (unlikely(order)) + if (unlikely(PageCompound(page))) destroy_compound_page(page, order); page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); @@ -314,17 +327,15 @@ static inline void __free_pages_bulk (struct page *page, struct free_area *area; struct page *buddy; - combined_idx = __find_combined_index(page_idx, order); buddy = __page_find_buddy(page, page_idx, order); - - if (bad_range(zone, buddy)) - break; if (!page_is_buddy(buddy, order)) break; /* Move the buddy up one level. */ + list_del(&buddy->lru); area = zone->free_area + order; area->nr_free--; rmv_page_order(buddy); + combined_idx = __find_combined_index(page_idx, order); page = page + (combined_idx - page_idx); page_idx = combined_idx; order++; @@ -334,11 +345,11 @@ static inline void __free_pages_bulk (struct page *page, zone->free_area[order].nr_free++; } -static inline int free_pages_check(const char *function, struct page *page) +static inline int free_pages_check(struct page *page) { - if ( page_mapcount(page) || - page->mapping != NULL || - page_count(page) != 0 || + if (unlikely(page_mapcount(page) | + (page->mapping != NULL) | + (page_count(page) != 0) | (page->flags & ( 1 << PG_lru | 1 << PG_private | @@ -348,8 +359,8 @@ static inline int free_pages_check(const char *function, struct page *page) 1 << PG_slab | 1 << PG_swapcache | 1 << PG_writeback | - 1 << PG_reserved ))) - bad_page(function, page); + 1 << PG_reserved )))) + bad_page(page); if (PageDirty(page)) __ClearPageDirty(page); /* @@ -375,11 +386,10 @@ static int free_pages_bulk(struct zone *zone, int count, struct list_head *list, unsigned int order) { - unsigned long flags; struct page *page = NULL; int ret = 0; - spin_lock_irqsave(&zone->lock, flags); + spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; while (!list_empty(list) && count--) { @@ -389,12 +399,13 @@ free_pages_bulk(struct zone *zone, int count, __free_pages_bulk(page, zone, order); ret++; } - spin_unlock_irqrestore(&zone->lock, flags); + spin_unlock(&zone->lock); return ret; } void __free_pages_ok(struct page *page, unsigned int order) { + unsigned long flags; LIST_HEAD(list); int i; int reserved = 0; @@ -408,14 +419,49 @@ void __free_pages_ok(struct page *page, unsigned int order) #endif for (i = 0 ; i < (1 << order) ; ++i) - reserved += free_pages_check(__FUNCTION__, page + i); + reserved += free_pages_check(page + i); if (reserved) return; list_add(&page->lru, &list); - mod_page_state(pgfree, 1 << order); kernel_map_pages(page, 1<<order, 0); + local_irq_save(flags); + __mod_page_state(pgfree, 1 << order); free_pages_bulk(page_zone(page), 1, &list, order); + local_irq_restore(flags); +} + +/* + * permit the bootmem allocator to evade page validation on high-order frees + */ +void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) +{ + if (order == 0) { + __ClearPageReserved(page); + set_page_count(page, 0); + + free_hot_cold_page(page, 0); + } else { + LIST_HEAD(list); + int loop; + + for (loop = 0; loop < BITS_PER_LONG; loop++) { + struct page *p = &page[loop]; + + if (loop + 16 < BITS_PER_LONG) + prefetchw(p + 16); + __ClearPageReserved(p); + set_page_count(p, 0); + } + + arch_free_page(page, order); + + mod_page_state(pgfree, 1 << order); + + list_add(&page->lru, &list); + kernel_map_pages(page, 1 << order, 0); + free_pages_bulk(page_zone(page), 1, &list, order); + } } @@ -433,8 +479,7 @@ void __free_pages_ok(struct page *page, unsigned int order) * * -- wli */ -static inline struct page * -expand(struct zone *zone, struct page *page, +static inline void expand(struct zone *zone, struct page *page, int low, int high, struct free_area *area) { unsigned long size = 1 << high; @@ -448,24 +493,6 @@ expand(struct zone *zone, struct page *page, area->nr_free++; set_page_order(&page[size], high); } - return page; -} - -void set_page_refs(struct page *page, int order) -{ -#ifdef CONFIG_MMU - set_page_count(page, 1); -#else - int i; - - /* - * We need to reference all the pages for this order, otherwise if - * anyone accesses one of the pages with (get/put) it will be freed. - * - eg: access_process_vm() - */ - for (i = 0; i < (1 << order); i++) - set_page_count(page + i, 1); -#endif /* CONFIG_MMU */ } /* @@ -473,9 +500,9 @@ void set_page_refs(struct page *page, int order) */ static int prep_new_page(struct page *page, int order) { - if ( page_mapcount(page) || - page->mapping != NULL || - page_count(page) != 0 || + if (unlikely(page_mapcount(page) | + (page->mapping != NULL) | + (page_count(page) != 0) | (page->flags & ( 1 << PG_lru | 1 << PG_private | @@ -486,8 +513,8 @@ static int prep_new_page(struct page *page, int order) 1 << PG_slab | 1 << PG_swapcache | 1 << PG_writeback | - 1 << PG_reserved ))) - bad_page(__FUNCTION__, page); + 1 << PG_reserved )))) + bad_page(page); /* * For now, we report if PG_reserved was found set, but do not @@ -525,7 +552,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order) rmv_page_order(page); area->nr_free--; zone->free_pages -= 1UL << order; - return expand(zone, page, order, current_order, area); + expand(zone, page, order, current_order, area); + return page; } return NULL; @@ -539,21 +567,17 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order) static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list) { - unsigned long flags; int i; - int allocated = 0; - struct page *page; - spin_lock_irqsave(&zone->lock, flags); + spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - page = __rmqueue(zone, order); - if (page == NULL) + struct page *page = __rmqueue(zone, order); + if (unlikely(page == NULL)) break; - allocated++; list_add_tail(&page->lru, list); } - spin_unlock_irqrestore(&zone->lock, flags); - return allocated; + spin_unlock(&zone->lock); + return i; } #ifdef CONFIG_NUMA @@ -589,6 +613,7 @@ void drain_remote_pages(void) #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) static void __drain_pages(unsigned int cpu) { + unsigned long flags; struct zone *zone; int i; @@ -600,8 +625,10 @@ static void __drain_pages(unsigned int cpu) struct per_cpu_pages *pcp; pcp = &pset->pcp[i]; + local_irq_save(flags); pcp->count -= free_pages_bulk(zone, pcp->count, &pcp->list, 0); + local_irq_restore(flags); } } } @@ -647,18 +674,14 @@ void drain_local_pages(void) } #endif /* CONFIG_PM */ -static void zone_statistics(struct zonelist *zonelist, struct zone *z) +static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu) { #ifdef CONFIG_NUMA - unsigned long flags; - int cpu; pg_data_t *pg = z->zone_pgdat; pg_data_t *orig = zonelist->zones[0]->zone_pgdat; struct per_cpu_pageset *p; - local_irq_save(flags); - cpu = smp_processor_id(); - p = zone_pcp(z,cpu); + p = zone_pcp(z, cpu); if (pg == orig) { p->numa_hit++; } else { @@ -669,14 +692,12 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z) p->local_node++; else p->other_node++; - local_irq_restore(flags); #endif } /* * Free a 0-order page */ -static void FASTCALL(free_hot_cold_page(struct page *page, int cold)); static void fastcall free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); @@ -687,14 +708,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) if (PageAnon(page)) page->mapping = NULL; - if (free_pages_check(__FUNCTION__, page)) + if (free_pages_check(page)) return; - inc_page_state(pgfree); kernel_map_pages(page, 1, 0); pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; local_irq_save(flags); + __inc_page_state(pgfree); list_add(&page->lru, &pcp->list); pcp->count++; if (pcp->count >= pcp->high) @@ -727,49 +748,58 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) * we cheat by calling it from here, in the order > 0 path. Saves a branch * or two. */ -static struct page * -buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) +static struct page *buffered_rmqueue(struct zonelist *zonelist, + struct zone *zone, int order, gfp_t gfp_flags) { unsigned long flags; struct page *page; int cold = !!(gfp_flags & __GFP_COLD); + int cpu; again: + cpu = get_cpu(); if (order == 0) { struct per_cpu_pages *pcp; - page = NULL; - pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; + pcp = &zone_pcp(zone, cpu)->pcp[cold]; local_irq_save(flags); - if (pcp->count <= pcp->low) + if (!pcp->count) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, &pcp->list); - if (pcp->count) { - page = list_entry(pcp->list.next, struct page, lru); - list_del(&page->lru); - pcp->count--; + if (unlikely(!pcp->count)) + goto failed; } - local_irq_restore(flags); - put_cpu(); + page = list_entry(pcp->list.next, struct page, lru); + list_del(&page->lru); + pcp->count--; } else { spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order); - spin_unlock_irqrestore(&zone->lock, flags); + spin_unlock(&zone->lock); + if (!page) + goto failed; } - if (page != NULL) { - BUG_ON(bad_range(zone, page)); - mod_page_state_zone(zone, pgalloc, 1 << order); - if (prep_new_page(page, order)) - goto again; + __mod_page_state_zone(zone, pgalloc, 1 << order); + zone_statistics(zonelist, zone, cpu); + local_irq_restore(flags); + put_cpu(); + + BUG_ON(bad_range(zone, page)); + if (prep_new_page(page, order)) + goto again; - if (gfp_flags & __GFP_ZERO) - prep_zero_page(page, order, gfp_flags); + if (gfp_flags & __GFP_ZERO) + prep_zero_page(page, order, gfp_flags); - if (order && (gfp_flags & __GFP_COMP)) - prep_compound_page(page, order); - } + if (order && (gfp_flags & __GFP_COMP)) + prep_compound_page(page, order); return page; + +failed: + local_irq_restore(flags); + put_cpu(); + return NULL; } #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ @@ -845,9 +875,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, continue; } - page = buffered_rmqueue(*z, order, gfp_mask); + page = buffered_rmqueue(zonelist, *z, order, gfp_mask); if (page) { - zone_statistics(zonelist, *z); break; } } while (*(++z) != NULL); @@ -903,8 +932,7 @@ restart: alloc_flags |= ALLOC_HARDER; if (gfp_mask & __GFP_HIGH) alloc_flags |= ALLOC_HIGH; - if (wait) - alloc_flags |= ALLOC_CPUSET; + alloc_flags |= ALLOC_CPUSET; /* * Go through the zonelist again. Let __GFP_HIGH and allocations @@ -926,7 +954,7 @@ restart: nofail_alloc: /* go through the zonelist yet again, ignoring mins */ page = get_page_from_freelist(gfp_mask, order, - zonelist, ALLOC_NO_WATERMARKS|ALLOC_CPUSET); + zonelist, ALLOC_NO_WATERMARKS); if (page) goto got_pg; if (gfp_mask & __GFP_NOFAIL) { @@ -1171,12 +1199,11 @@ EXPORT_SYMBOL(nr_pagecache); DEFINE_PER_CPU(long, nr_pagecache_local) = 0; #endif -void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) +static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) { int cpu = 0; memset(ret, 0, sizeof(*ret)); - cpus_and(*cpumask, *cpumask, cpu_online_map); cpu = first_cpu(*cpumask); while (cpu < NR_CPUS) { @@ -1224,12 +1251,12 @@ void get_full_page_state(struct page_state *ret) __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); } -unsigned long __read_page_state(unsigned long offset) +unsigned long read_page_state_offset(unsigned long offset) { unsigned long ret = 0; int cpu; - for_each_online_cpu(cpu) { + for_each_cpu(cpu) { unsigned long in; in = (unsigned long)&per_cpu(page_states, cpu) + offset; @@ -1238,18 +1265,26 @@ unsigned long __read_page_state(unsigned long offset) return ret; } -void __mod_page_state(unsigned long offset, unsigned long delta) +void __mod_page_state_offset(unsigned long offset, unsigned long delta) +{ + void *ptr; + + ptr = &__get_cpu_var(page_states); + *(unsigned long *)(ptr + offset) += delta; +} +EXPORT_SYMBOL(__mod_page_state_offset); + +void mod_page_state_offset(unsigned long offset, unsigned long delta) { unsigned long flags; - void* ptr; + void *ptr; local_irq_save(flags); ptr = &__get_cpu_var(page_states); - *(unsigned long*)(ptr + offset) += delta; + *(unsigned long *)(ptr + offset) += delta; local_irq_restore(flags); } - -EXPORT_SYMBOL(__mod_page_state); +EXPORT_SYMBOL(mod_page_state_offset); void __get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free, struct pglist_data *pgdat) @@ -1335,7 +1370,7 @@ void show_free_areas(void) show_node(zone); printk("%s per-cpu:", zone->name); - if (!zone->present_pages) { + if (!populated_zone(zone)) { printk(" empty\n"); continue; } else @@ -1347,10 +1382,9 @@ void show_free_areas(void) pageset = zone_pcp(zone, cpu); for (temperature = 0; temperature < 2; temperature++) - printk("cpu %d %s: low %d, high %d, batch %d used:%d\n", + printk("cpu %d %s: high %d, batch %d used:%d\n", cpu, temperature ? "cold" : "hot", - pageset->pcp[temperature].low, pageset->pcp[temperature].high, pageset->pcp[temperature].batch, pageset->pcp[temperature].count); @@ -1413,7 +1447,7 @@ void show_free_areas(void) show_node(zone); printk("%s: ", zone->name); - if (!zone->present_pages) { + if (!populated_zone(zone)) { printk("empty\n"); continue; } @@ -1433,36 +1467,29 @@ void show_free_areas(void) /* * Builds allocation fallback zone lists. + * + * Add all populated zones of a node to the zonelist. */ -static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k) -{ - switch (k) { - struct zone *zone; - default: - BUG(); - case ZONE_HIGHMEM: - zone = pgdat->node_zones + ZONE_HIGHMEM; - if (zone->present_pages) { +static int __init build_zonelists_node(pg_data_t *pgdat, + struct zonelist *zonelist, int nr_zones, int zone_type) +{ + struct zone *zone; + + BUG_ON(zone_type > ZONE_HIGHMEM); + + do { + zone = pgdat->node_zones + zone_type; + if (populated_zone(zone)) { #ifndef CONFIG_HIGHMEM - BUG(); + BUG_ON(zone_type > ZONE_NORMAL); #endif - zonelist->zones[j++] = zone; + zonelist->zones[nr_zones++] = zone; + check_highest_zone(zone_type); } - case ZONE_NORMAL: - zone = pgdat->node_zones + ZONE_NORMAL; - if (zone->present_pages) - zonelist->zones[j++] = zone; - case ZONE_DMA32: - zone = pgdat->node_zones + ZONE_DMA32; - if (zone->present_pages) - zonelist->zones[j++] = zone; - case ZONE_DMA: - zone = pgdat->node_zones + ZONE_DMA; - if (zone->present_pages) - zonelist->zones[j++] = zone; - } + zone_type--; - return j; + } while (zone_type >= 0); + return nr_zones; } static inline int highest_zone(int zone_bits) @@ -1709,8 +1736,6 @@ void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone, for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) { if (!early_pfn_valid(pfn)) continue; - if (!early_pfn_in_nid(pfn, nid)) - continue; page = pfn_to_page(pfn); set_page_links(page, zone, nid, pfn); set_page_count(page, 1); @@ -1794,14 +1819,12 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) pcp = &p->pcp[0]; /* hot */ pcp->count = 0; - pcp->low = 0; pcp->high = 6 * batch; pcp->batch = max(1UL, 1 * batch); INIT_LIST_HEAD(&pcp->list); pcp = &p->pcp[1]; /* cold*/ pcp->count = 0; - pcp->low = 0; pcp->high = 2 * batch; pcp->batch = max(1UL, batch/2); INIT_LIST_HEAD(&pcp->list); @@ -2116,7 +2139,7 @@ static int frag_show(struct seq_file *m, void *arg) int order; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { - if (!zone->present_pages) + if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); @@ -2149,7 +2172,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg) for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { int i; - if (!zone->present_pages) + if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); @@ -2197,12 +2220,10 @@ static int zoneinfo_show(struct seq_file *m, void *arg) seq_printf(m, "\n cpu: %i pcp: %i" "\n count: %i" - "\n low: %i" "\n high: %i" "\n batch: %i", i, j, pageset->pcp[j].count, - pageset->pcp[j].low, pageset->pcp[j].high, pageset->pcp[j].batch); } @@ -2257,32 +2278,40 @@ static char *vmstat_text[] = { "pgpgout", "pswpin", "pswpout", - "pgalloc_high", + "pgalloc_high", "pgalloc_normal", + "pgalloc_dma32", "pgalloc_dma", + "pgfree", "pgactivate", "pgdeactivate", "pgfault", "pgmajfault", + "pgrefill_high", "pgrefill_normal", + "pgrefill_dma32", "pgrefill_dma", "pgsteal_high", "pgsteal_normal", + "pgsteal_dma32", "pgsteal_dma", + "pgscan_kswapd_high", "pgscan_kswapd_normal", - + "pgscan_kswapd_dma32", "pgscan_kswapd_dma", + "pgscan_direct_high", "pgscan_direct_normal", + "pgscan_direct_dma32", "pgscan_direct_dma", - "pginodesteal", + "pginodesteal", "slabs_scanned", "kswapd_steal", "kswapd_inodesteal", @@ -435,6 +435,30 @@ int page_referenced(struct page *page, int is_locked) } /** + * page_set_anon_rmap - setup new anonymous rmap + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + */ +static void __page_set_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + struct anon_vma *anon_vma = vma->anon_vma; + + BUG_ON(!anon_vma); + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + page->mapping = (struct address_space *) anon_vma; + + page->index = linear_page_index(vma, address); + + /* + * nr_mapped state can be updated without turning off + * interrupts because it is not modified via interrupt. + */ + __inc_page_state(nr_mapped); +} + +/** * page_add_anon_rmap - add pte mapping to an anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added @@ -445,20 +469,27 @@ int page_referenced(struct page *page, int is_locked) void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { - if (atomic_inc_and_test(&page->_mapcount)) { - struct anon_vma *anon_vma = vma->anon_vma; - - BUG_ON(!anon_vma); - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - page->mapping = (struct address_space *) anon_vma; - - page->index = linear_page_index(vma, address); - - inc_page_state(nr_mapped); - } + if (atomic_inc_and_test(&page->_mapcount)) + __page_set_anon_rmap(page, vma, address); /* else checking page index and mapping is racy */ } +/* + * page_add_new_anon_rmap - add pte mapping to a new anonymous page + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + * + * Same as page_add_anon_rmap but must only be called on *new* pages. + * This means the inc-and-test can be bypassed. + */ +void page_add_new_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ + __page_set_anon_rmap(page, vma, address); +} + /** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to @@ -471,7 +502,7 @@ void page_add_file_rmap(struct page *page) BUG_ON(!pfn_valid(page_to_pfn(page))); if (atomic_inc_and_test(&page->_mapcount)) - inc_page_state(nr_mapped); + __inc_page_state(nr_mapped); } /** @@ -495,7 +526,7 @@ void page_remove_rmap(struct page *page) */ if (page_test_and_clear_dirty(page)) set_page_dirty(page); - dec_page_state(nr_mapped); + __dec_page_state(nr_mapped); } } @@ -457,7 +457,7 @@ static void shmem_free_pages(struct list_head *next) } while (next); } -static void shmem_truncate(struct inode *inode) +static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) { struct shmem_inode_info *info = SHMEM_I(inode); unsigned long idx; @@ -475,18 +475,27 @@ static void shmem_truncate(struct inode *inode) long nr_swaps_freed = 0; int offset; int freed; + int punch_hole = 0; inode->i_ctime = inode->i_mtime = CURRENT_TIME; - idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (idx >= info->next_index) return; spin_lock(&info->lock); info->flags |= SHMEM_TRUNCATE; - limit = info->next_index; - info->next_index = idx; + if (likely(end == (loff_t) -1)) { + limit = info->next_index; + info->next_index = idx; + } else { + limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + if (limit > info->next_index) + limit = info->next_index; + punch_hole = 1; + } + topdir = info->i_indirect; - if (topdir && idx <= SHMEM_NR_DIRECT) { + if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { info->i_indirect = NULL; nr_pages_to_free++; list_add(&topdir->lru, &pages_to_free); @@ -573,11 +582,12 @@ static void shmem_truncate(struct inode *inode) set_page_private(subdir, page_private(subdir) - freed); if (offset) spin_unlock(&info->lock); - BUG_ON(page_private(subdir) > offset); + if (!punch_hole) + BUG_ON(page_private(subdir) > offset); } if (offset) offset = 0; - else if (subdir) { + else if (subdir && !page_private(subdir)) { dir[diroff] = NULL; nr_pages_to_free++; list_add(&subdir->lru, &pages_to_free); @@ -594,7 +604,7 @@ done2: * Also, though shmem_getpage checks i_size before adding to * cache, no recheck after: so fix the narrow window there too. */ - truncate_inode_pages(inode->i_mapping, inode->i_size); + truncate_inode_pages_range(inode->i_mapping, start, end); } spin_lock(&info->lock); @@ -614,6 +624,11 @@ done2: } } +static void shmem_truncate(struct inode *inode) +{ + shmem_truncate_range(inode, inode->i_size, (loff_t)-1); +} + static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; @@ -1255,7 +1270,7 @@ out_nomem: return retval; } -static int shmem_mmap(struct file *file, struct vm_area_struct *vma) +int shmem_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &shmem_vm_ops; @@ -2083,6 +2098,7 @@ static struct file_operations shmem_file_operations = { static struct inode_operations shmem_inode_operations = { .truncate = shmem_truncate, .setattr = shmem_notify_change, + .truncate_range = shmem_truncate_range, }; static struct inode_operations shmem_dir_inode_operations = { @@ -156,16 +156,22 @@ void fastcall lru_cache_add_active(struct page *page) put_cpu_var(lru_add_active_pvecs); } -void lru_add_drain(void) +static void __lru_add_drain(int cpu) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); + struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); + /* CPU is dead, so no locking needed. */ if (pagevec_count(pvec)) __pagevec_lru_add(pvec); - pvec = &__get_cpu_var(lru_add_active_pvecs); + pvec = &per_cpu(lru_add_active_pvecs, cpu); if (pagevec_count(pvec)) __pagevec_lru_add_active(pvec); - put_cpu_var(lru_add_pvecs); +} + +void lru_add_drain(void) +{ + __lru_add_drain(get_cpu()); + put_cpu(); } /* @@ -412,17 +418,6 @@ void vm_acct_memory(long pages) } #ifdef CONFIG_HOTPLUG_CPU -static void lru_drain_cache(unsigned int cpu) -{ - struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); - - /* CPU is dead, so no locking needed. */ - if (pagevec_count(pvec)) - __pagevec_lru_add(pvec); - pvec = &per_cpu(lru_add_active_pvecs, cpu); - if (pagevec_count(pvec)) - __pagevec_lru_add_active(pvec); -} /* Drop the CPU's cached committed space back into the central pool. */ static int cpu_swap_callback(struct notifier_block *nfb, @@ -435,7 +430,7 @@ static int cpu_swap_callback(struct notifier_block *nfb, if (action == CPU_DEAD) { atomic_add(*committed, &vm_committed_space); *committed = 0; - lru_drain_cache((long)hcpu); + __lru_add_drain((long)hcpu); } return NOTIFY_OK; } diff --git a/mm/swap_state.c b/mm/swap_state.c index 0df9a57..fc2aecb 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -14,6 +14,7 @@ #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/backing-dev.h> +#include <linux/pagevec.h> #include <asm/pgtable.h> @@ -272,12 +273,11 @@ void free_page_and_swap_cache(struct page *page) */ void free_pages_and_swap_cache(struct page **pages, int nr) { - int chunk = 16; struct page **pagep = pages; lru_add_drain(); while (nr) { - int todo = min(chunk, nr); + int todo = min(nr, PAGEVEC_SIZE); int i; for (i = 0; i < todo; i++) diff --git a/mm/swapfile.c b/mm/swapfile.c index edafeac..6da4b28 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -211,6 +211,26 @@ noswap: return (swp_entry_t) {0}; } +swp_entry_t get_swap_page_of_type(int type) +{ + struct swap_info_struct *si; + pgoff_t offset; + + spin_lock(&swap_lock); + si = swap_info + type; + if (si->flags & SWP_WRITEOK) { + nr_swap_pages--; + offset = scan_swap_map(si); + if (offset) { + spin_unlock(&swap_lock); + return swp_entry(type, offset); + } + nr_swap_pages++; + } + spin_unlock(&swap_lock); + return (swp_entry_t) {0}; +} + static struct swap_info_struct * swap_info_get(swp_entry_t entry) { struct swap_info_struct * p; diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c index b58abcf..cdc6d43 100644 --- a/mm/tiny-shmem.c +++ b/mm/tiny-shmem.c @@ -81,13 +81,19 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) goto close_file; d_instantiate(dentry, inode); - inode->i_size = size; inode->i_nlink = 0; /* It is unlinked */ + file->f_vfsmnt = mntget(shm_mnt); file->f_dentry = dentry; file->f_mapping = inode->i_mapping; file->f_op = &ramfs_file_operations; file->f_mode = FMODE_WRITE | FMODE_READ; + + /* notify everyone as to the change of file size */ + error = do_truncate(dentry, size, file); + if (error < 0) + goto close_file; + return file; close_file: @@ -123,3 +129,24 @@ int shmem_unuse(swp_entry_t entry, struct page *page) { return 0; } + +int shmem_mmap(struct file *file, struct vm_area_struct *vma) +{ + file_accessed(file); +#ifndef CONFIG_MMU + return ramfs_nommu_mmap(file, vma); +#else + return 0; +#endif +} + +#ifndef CONFIG_MMU +unsigned long shmem_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + return ramfs_nommu_get_unmapped_area(file, addr, len, pgoff, flags); +} +#endif diff --git a/mm/truncate.c b/mm/truncate.c index 9173ab5..7dee327 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -82,12 +82,15 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) } /** - * truncate_inode_pages - truncate *all* the pages from an offset + * truncate_inode_pages - truncate range of pages specified by start and + * end byte offsets * @mapping: mapping to truncate * @lstart: offset from which to truncate + * @lend: offset to which to truncate * - * Truncate the page cache at a set offset, removing the pages that are beyond - * that offset (and zeroing out partial pages). + * Truncate the page cache, removing the pages that are between + * specified offsets (and zeroing out partial page + * (if lstart is not page aligned)). * * Truncate takes two passes - the first pass is nonblocking. It will not * block on page locks and it will not block on writeback. The second pass @@ -101,12 +104,12 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) * We pass down the cache-hot hint to the page freeing code. Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. - * - * Called under (and serialised by) inode->i_sem. */ -void truncate_inode_pages(struct address_space *mapping, loff_t lstart) +void truncate_inode_pages_range(struct address_space *mapping, + loff_t lstart, loff_t lend) { const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; + pgoff_t end; const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); struct pagevec pvec; pgoff_t next; @@ -115,13 +118,22 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) if (mapping->nrpages == 0) return; + BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); + end = (lend >> PAGE_CACHE_SHIFT); + pagevec_init(&pvec, 0); next = start; - while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + while (next <= end && + pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index = page->index; + if (page_index > end) { + next = page_index; + break; + } + if (page_index > next) next = page_index; next++; @@ -157,9 +169,15 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) next = start; continue; } + if (pvec.pages[0]->index > end) { + pagevec_release(&pvec); + break; + } for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; + if (page->index > end) + break; lock_page(page); wait_on_page_writeback(page); if (page->index > next) @@ -171,7 +189,19 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) pagevec_release(&pvec); } } +EXPORT_SYMBOL(truncate_inode_pages_range); +/** + * truncate_inode_pages - truncate *all* the pages from an offset + * @mapping: mapping to truncate + * @lstart: offset from which to truncate + * + * Called under (and serialised by) inode->i_sem. + */ +void truncate_inode_pages(struct address_space *mapping, loff_t lstart) +{ + truncate_inode_pages_range(mapping, lstart, (loff_t)-1); +} EXPORT_SYMBOL(truncate_inode_pages); /** diff --git a/mm/vmscan.c b/mm/vmscan.c index 795a050..be8235f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -63,9 +63,6 @@ struct scan_control { unsigned long nr_mapped; /* From page_state */ - /* How many pages shrink_cache() should reclaim */ - int nr_to_reclaim; - /* Ask shrink_caches, or shrink_zone to scan at this priority */ unsigned int priority; @@ -74,9 +71,6 @@ struct scan_control { int may_writepage; - /* Can pages be swapped as part of reclaim? */ - int may_swap; - /* This context's SWAP_CLUSTER_MAX. If freeing memory for * suspend, we effectively ignore SWAP_CLUSTER_MAX. * In this context, it doesn't matter that we scan the @@ -430,8 +424,6 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) { - if (!sc->may_swap) - goto keep_locked; if (!add_to_swap(page)) goto activate_locked; } @@ -653,17 +645,17 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc) goto done; max_scan -= nr_scan; - if (current_is_kswapd()) - mod_page_state_zone(zone, pgscan_kswapd, nr_scan); - else - mod_page_state_zone(zone, pgscan_direct, nr_scan); nr_freed = shrink_list(&page_list, sc); - if (current_is_kswapd()) - mod_page_state(kswapd_steal, nr_freed); - mod_page_state_zone(zone, pgsteal, nr_freed); - sc->nr_to_reclaim -= nr_freed; - spin_lock_irq(&zone->lru_lock); + local_irq_disable(); + if (current_is_kswapd()) { + __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); + __mod_page_state(kswapd_steal, nr_freed); + } else + __mod_page_state_zone(zone, pgscan_direct, nr_scan); + __mod_page_state_zone(zone, pgsteal, nr_freed); + + spin_lock(&zone->lru_lock); /* * Put back any unfreeable pages. */ @@ -825,11 +817,13 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) } } zone->nr_active += pgmoved; - spin_unlock_irq(&zone->lru_lock); - pagevec_release(&pvec); + spin_unlock(&zone->lru_lock); + + __mod_page_state_zone(zone, pgrefill, pgscanned); + __mod_page_state(pgdeactivate, pgdeactivate); + local_irq_enable(); - mod_page_state_zone(zone, pgrefill, pgscanned); - mod_page_state(pgdeactivate, pgdeactivate); + pagevec_release(&pvec); } /* @@ -861,8 +855,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc) else nr_inactive = 0; - sc->nr_to_reclaim = sc->swap_cluster_max; - while (nr_active || nr_inactive) { if (nr_active) { sc->nr_to_scan = min(nr_active, @@ -876,8 +868,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc) (unsigned long)sc->swap_cluster_max); nr_inactive -= sc->nr_to_scan; shrink_cache(zone, sc); - if (sc->nr_to_reclaim <= 0) - break; } } @@ -910,7 +900,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) for (i = 0; zones[i] != NULL; i++) { struct zone *zone = zones[i]; - if (zone->present_pages == 0) + if (!populated_zone(zone)) continue; if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) @@ -952,7 +942,6 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) sc.gfp_mask = gfp_mask; sc.may_writepage = 0; - sc.may_swap = 1; inc_page_state(allocstall); @@ -1055,7 +1044,6 @@ loop_again: total_reclaimed = 0; sc.gfp_mask = GFP_KERNEL; sc.may_writepage = 0; - sc.may_swap = 1; sc.nr_mapped = read_page_state(nr_mapped); inc_page_state(pageoutrun); @@ -1084,7 +1072,7 @@ loop_again: for (i = pgdat->nr_zones - 1; i >= 0; i--) { struct zone *zone = pgdat->node_zones + i; - if (zone->present_pages == 0) + if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && @@ -1121,7 +1109,7 @@ scan: struct zone *zone = pgdat->node_zones + i; int nr_slab; - if (zone->present_pages == 0) + if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && priority != DEF_PRIORITY) @@ -1273,7 +1261,7 @@ void wakeup_kswapd(struct zone *zone, int order) { pg_data_t *pgdat; - if (zone->present_pages == 0) + if (!populated_zone(zone)) return; pgdat = zone->zone_pgdat; @@ -1353,76 +1341,3 @@ static int __init kswapd_init(void) } module_init(kswapd_init) - - -/* - * Try to free up some pages from this zone through reclaim. - */ -int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) -{ - struct scan_control sc; - int nr_pages = 1 << order; - int total_reclaimed = 0; - - /* The reclaim may sleep, so don't do it if sleep isn't allowed */ - if (!(gfp_mask & __GFP_WAIT)) - return 0; - if (zone->all_unreclaimable) - return 0; - - sc.gfp_mask = gfp_mask; - sc.may_writepage = 0; - sc.may_swap = 0; - sc.nr_mapped = read_page_state(nr_mapped); - sc.nr_scanned = 0; - sc.nr_reclaimed = 0; - /* scan at the highest priority */ - sc.priority = 0; - disable_swap_token(); - - if (nr_pages > SWAP_CLUSTER_MAX) - sc.swap_cluster_max = nr_pages; - else - sc.swap_cluster_max = SWAP_CLUSTER_MAX; - - /* Don't reclaim the zone if there are other reclaimers active */ - if (atomic_read(&zone->reclaim_in_progress) > 0) - goto out; - - shrink_zone(zone, &sc); - total_reclaimed = sc.nr_reclaimed; - - out: - return total_reclaimed; -} - -asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, - unsigned int state) -{ - struct zone *z; - int i; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (node >= MAX_NUMNODES || !node_online(node)) - return -EINVAL; - - /* This will break if we ever add more zones */ - if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM))) - return -EINVAL; - - for (i = 0; i < MAX_NR_ZONES; i++) { - if (!(zone & 1<<i)) - continue; - - z = &NODE_DATA(node)->node_zones[i]; - - if (state) - z->reclaim_pages = 1; - else - z->reclaim_pages = 0; - } - - return 0; -} diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index cac2e77..3e6c694 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -101,10 +101,22 @@ static void ip_map_put(struct cache_head *item, struct cache_detail *cd) } } +#if IP_HASHBITS == 8 +/* hash_long on a 64 bit machine is currently REALLY BAD for + * IP addresses in reverse-endian (i.e. on a little-endian machine). + * So use a trivial but reliable hash instead + */ +static inline int hash_ip(unsigned long ip) +{ + int hash = ip ^ (ip>>16); + return (hash ^ (hash>>8)) & 0xff; +} +#endif + static inline int ip_map_hash(struct ip_map *item) { return hash_str(item->m_class, IP_HASHBITS) ^ - hash_long((unsigned long)item->m_addr.s_addr, IP_HASHBITS); + hash_ip((unsigned long)item->m_addr.s_addr); } static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp) { diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index d68eba4..e67613e 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1026,7 +1026,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) } else { printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", svsk->sk_server->sv_name, -len); - svc_sock_received(svsk); + goto err_delete; } return len; diff --git a/security/keys/internal.h b/security/keys/internal.h index db99ed4..39cba97 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -25,7 +25,6 @@ #define kdebug(FMT, a...) do {} while(0) #endif -extern struct key_type key_type_dead; extern struct key_type key_type_user; /*****************************************************************************/ diff --git a/security/keys/key.c b/security/keys/key.c index 01bcfec..99781b7 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -36,7 +36,7 @@ static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); DECLARE_RWSEM(key_construction_sem); /* any key who's type gets unegistered will be re-typed to this */ -struct key_type key_type_dead = { +static struct key_type key_type_dead = { .name = "dead", }; @@ -240,9 +240,9 @@ static inline void key_alloc_serial(struct key *key) /* * allocate a key of the specified type * - update the user's quota to reflect the existence of the key - * - called from a key-type operation with key_types_sem read-locked by either - * key_create_or_update() or by key_duplicate(); this prevents unregistration - * of the key type + * - called from a key-type operation with key_types_sem read-locked by + * key_create_or_update() + * - this prevents unregistration of the key type * - upon return the key is as yet uninstantiated; the caller needs to either * instantiate the key or discard it before returning */ @@ -889,56 +889,6 @@ EXPORT_SYMBOL(key_update); /*****************************************************************************/ /* - * duplicate a key, potentially with a revised description - * - must be supported by the keytype (keyrings for instance can be duplicated) - */ -struct key *key_duplicate(struct key *source, const char *desc) -{ - struct key *key; - int ret; - - key_check(source); - - if (!desc) - desc = source->description; - - down_read(&key_types_sem); - - ret = -EINVAL; - if (!source->type->duplicate) - goto error; - - /* allocate and instantiate a key */ - key = key_alloc(source->type, desc, current->fsuid, current->fsgid, - source->perm, 0); - if (IS_ERR(key)) - goto error_k; - - down_read(&source->sem); - ret = key->type->duplicate(key, source); - up_read(&source->sem); - if (ret < 0) - goto error2; - - atomic_inc(&key->user->nikeys); - set_bit(KEY_FLAG_INSTANTIATED, &key->flags); - - error_k: - up_read(&key_types_sem); - out: - return key; - - error2: - key_put(key); - error: - up_read(&key_types_sem); - key = ERR_PTR(ret); - goto out; - -} /* end key_duplicate() */ - -/*****************************************************************************/ -/* * revoke a key */ void key_revoke(struct key *key) diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 4e9fa8b..5d22c03 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -48,7 +48,6 @@ static inline unsigned keyring_hash(const char *desc) */ static int keyring_instantiate(struct key *keyring, const void *data, size_t datalen); -static int keyring_duplicate(struct key *keyring, const struct key *source); static int keyring_match(const struct key *keyring, const void *criterion); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); @@ -59,7 +58,6 @@ struct key_type key_type_keyring = { .name = "keyring", .def_datalen = sizeof(struct keyring_list), .instantiate = keyring_instantiate, - .duplicate = keyring_duplicate, .match = keyring_match, .destroy = keyring_destroy, .describe = keyring_describe, @@ -70,7 +68,7 @@ struct key_type key_type_keyring = { * semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle */ -DECLARE_RWSEM(keyring_serialise_link_sem); +static DECLARE_RWSEM(keyring_serialise_link_sem); /*****************************************************************************/ /* @@ -120,68 +118,6 @@ static int keyring_instantiate(struct key *keyring, /*****************************************************************************/ /* - * duplicate the list of subscribed keys from a source keyring into this one - */ -static int keyring_duplicate(struct key *keyring, const struct key *source) -{ - struct keyring_list *sklist, *klist; - unsigned max; - size_t size; - int loop, ret; - - const unsigned limit = - (PAGE_SIZE - sizeof(*klist)) / sizeof(struct key *); - - ret = 0; - - /* find out how many keys are currently linked */ - rcu_read_lock(); - sklist = rcu_dereference(source->payload.subscriptions); - max = 0; - if (sklist) - max = sklist->nkeys; - rcu_read_unlock(); - - /* allocate a new payload and stuff load with key links */ - if (max > 0) { - BUG_ON(max > limit); - - max = (max + 3) & ~3; - if (max > limit) - max = limit; - - ret = -ENOMEM; - size = sizeof(*klist) + sizeof(struct key *) * max; - klist = kmalloc(size, GFP_KERNEL); - if (!klist) - goto error; - - /* set links */ - rcu_read_lock(); - sklist = rcu_dereference(source->payload.subscriptions); - - klist->maxkeys = max; - klist->nkeys = sklist->nkeys; - memcpy(klist->keys, - sklist->keys, - sklist->nkeys * sizeof(struct key *)); - - for (loop = klist->nkeys - 1; loop >= 0; loop--) - atomic_inc(&klist->keys[loop]->usage); - - rcu_read_unlock(); - - rcu_assign_pointer(keyring->payload.subscriptions, klist); - ret = 0; - } - - error: - return ret; - -} /* end keyring_duplicate() */ - -/*****************************************************************************/ -/* * match keyrings on their name */ static int keyring_match(const struct key *keyring, const void *description) diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index cbda3b2..8e71895 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -26,7 +26,6 @@ struct key_type key_type_user = { .name = "user", .instantiate = user_instantiate, - .duplicate = user_duplicate, .update = user_update, .match = user_match, .destroy = user_destroy, @@ -68,42 +67,10 @@ error: return ret; } /* end user_instantiate() */ - EXPORT_SYMBOL_GPL(user_instantiate); /*****************************************************************************/ /* - * duplicate a user defined key - * - both keys' semaphores are locked against further modification - * - the new key cannot yet be accessed - */ -int user_duplicate(struct key *key, const struct key *source) -{ - struct user_key_payload *upayload, *spayload; - int ret; - - /* just copy the payload */ - ret = -ENOMEM; - upayload = kmalloc(sizeof(*upayload) + source->datalen, GFP_KERNEL); - if (upayload) { - spayload = rcu_dereference(source->payload.data); - BUG_ON(source->datalen != spayload->datalen); - - upayload->datalen = key->datalen = spayload->datalen; - memcpy(upayload->data, spayload->data, key->datalen); - - key->payload.data = upayload; - ret = 0; - } - - return ret; - -} /* end user_duplicate() */ - -EXPORT_SYMBOL_GPL(user_duplicate); - -/*****************************************************************************/ -/* * dispose of the old data from an updated user defined key */ static void user_update_rcu_disposal(struct rcu_head *rcu) diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 0e1352a..e59da63 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -376,7 +376,7 @@ static ssize_t selinux_transaction_write(struct file *file, const char __user *b char *data; ssize_t rv; - if (ino >= sizeof(write_op)/sizeof(write_op[0]) || !write_op[ino]) + if (ino >= ARRAY_SIZE(write_op) || !write_op[ino]) return -EINVAL; data = simple_transaction_get(file, buf, size); @@ -1161,7 +1161,7 @@ static int sel_make_avc_files(struct dentry *dir) #endif }; - for (i = 0; i < sizeof (files) / sizeof (files[0]); i++) { + for (i = 0; i < ARRAY_SIZE(files); i++) { struct inode *inode; struct dentry *dentry; diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c index dde094f..d049c7a 100644 --- a/security/selinux/ss/avtab.c +++ b/security/selinux/ss/avtab.c @@ -359,7 +359,7 @@ int avtab_read_item(void *fp, u32 vers, struct avtab *a, return -1; } - for (i = 0; i < sizeof(spec_order)/sizeof(u16); i++) { + for (i = 0; i < ARRAY_SIZE(spec_order); i++) { if (val & spec_order[i]) { key.specified = spec_order[i] | enabled; datum.data = le32_to_cpu(buf32[items++]); diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 0ac311d..0111990 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -103,7 +103,7 @@ static struct policydb_compat_info *policydb_lookup_compat(int version) int i; struct policydb_compat_info *info = NULL; - for (i = 0; i < sizeof(policydb_compat)/sizeof(*info); i++) { + for (i = 0; i < ARRAY_SIZE(policydb_compat); i++) { if (policydb_compat[i].version == version) { info = &policydb_compat[i]; break; diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c index 3f30c57..49796be 100644 --- a/sound/oss/ad1848.c +++ b/sound/oss/ad1848.c @@ -46,8 +46,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/stddef.h> -#include <linux/pm.h> -#include <linux/pm_legacy.h> #include <linux/isapnp.h> #include <linux/pnp.h> #include <linux/spinlock.h> @@ -105,9 +103,6 @@ typedef struct int irq_ok; mixer_ents *mix_devices; int mixer_output_port; - - /* Power management */ - struct pm_dev *pmdev; } ad1848_info; typedef struct ad1848_port_info @@ -201,7 +196,6 @@ static void ad1848_halt(int dev); static void ad1848_halt_input(int dev); static void ad1848_halt_output(int dev); static void ad1848_trigger(int dev, int bits); -static int ad1848_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data); #ifndef EXCLUDE_TIMERS static int ad1848_tmr_install(int dev); @@ -2027,10 +2021,6 @@ int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback, nr_ad1848_devs++; - devc->pmdev = pm_register(PM_ISA_DEV, my_dev, ad1848_pm_callback); - if (devc->pmdev) - devc->pmdev->data = devc; - ad1848_init_hw(devc); if (irq > 0) @@ -2197,9 +2187,6 @@ void ad1848_unload(int io_base, int irq, int dma_playback, int dma_capture, int if(mixer>=0) sound_unload_mixerdev(mixer); - if (devc->pmdev) - pm_unregister(devc->pmdev); - nr_ad1848_devs--; for ( ; i < nr_ad1848_devs ; i++) adev_info[i] = adev_info[i+1]; @@ -2811,85 +2798,6 @@ static int ad1848_tmr_install(int dev) } #endif /* EXCLUDE_TIMERS */ -static int ad1848_suspend(ad1848_info *devc) -{ - unsigned long flags; - - spin_lock_irqsave(&devc->lock,flags); - - ad_mute(devc); - - spin_unlock_irqrestore(&devc->lock,flags); - return 0; -} - -static int ad1848_resume(ad1848_info *devc) -{ - int mixer_levels[32], i; - - /* Thinkpad is a bit more of PITA than normal. The BIOS tends to - restore it in a different config to the one we use. Need to - fix this somehow */ - - /* store old mixer levels */ - memcpy(mixer_levels, devc->levels, sizeof (mixer_levels)); - ad1848_init_hw(devc); - - /* restore mixer levels */ - for (i = 0; i < 32; i++) - ad1848_mixer_set(devc, devc->dev_no, mixer_levels[i]); - - if (!devc->subtype) { - static signed char interrupt_bits[12] = { -1, -1, -1, -1, -1, 0x00, -1, 0x08, -1, 0x10, 0x18, 0x20 }; - static char dma_bits[4] = { 1, 2, 0, 3 }; - unsigned long flags; - signed char bits; - char dma2_bit = 0; - - int config_port = devc->base + 0; - - bits = interrupt_bits[devc->irq]; - if (bits == -1) { - printk(KERN_ERR "MSS: Bad IRQ %d\n", devc->irq); - return -1; - } - - spin_lock_irqsave(&devc->lock,flags); - - outb((bits | 0x40), config_port); - - if (devc->dma2 != -1 && devc->dma2 != devc->dma1) - if ( (devc->dma1 == 0 && devc->dma2 == 1) || - (devc->dma1 == 1 && devc->dma2 == 0) || - (devc->dma1 == 3 && devc->dma2 == 0)) - dma2_bit = 0x04; - - outb((bits | dma_bits[devc->dma1] | dma2_bit), config_port); - spin_unlock_irqrestore(&devc->lock,flags); - } - - return 0; -} - -static int ad1848_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) -{ - ad1848_info *devc = dev->data; - if (devc) { - DEB(printk("ad1848: pm event received: 0x%x\n", rqst)); - - switch (rqst) { - case PM_SUSPEND: - ad1848_suspend(devc); - break; - case PM_RESUME: - ad1848_resume(devc); - break; - } - } - return 0; -} - - EXPORT_SYMBOL(ad1848_detect); EXPORT_SYMBOL(ad1848_init); EXPORT_SYMBOL(ad1848_unload); diff --git a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c index adc6896..46dd41d 100644 --- a/sound/oss/cs4281/cs4281m.c +++ b/sound/oss/cs4281/cs4281m.c @@ -298,7 +298,6 @@ struct cs4281_state { struct cs4281_pipeline pl[CS4281_NUMBER_OF_PIPELINES]; }; -#include <linux/pm_legacy.h> #include "cs4281pm-24.c" #if CSDEBUG @@ -4256,9 +4255,6 @@ static void __devinit cs4281_InitPM(struct cs4281_state *s) static int __devinit cs4281_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid) { -#ifndef NOT_CS4281_PM - struct pm_dev *pmdev; -#endif struct cs4281_state *s; dma_addr_t dma_mask; mm_segment_t fs; @@ -4374,19 +4370,7 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev, } #ifndef NOT_CS4281_PM cs4281_InitPM(s); - pmdev = cs_pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), cs4281_pm_callback); - if (pmdev) - { - CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO - "cs4281: probe() pm_register() succeeded (%p).\n", pmdev)); - pmdev->data = s; - } - else - { - CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO - "cs4281: probe() pm_register() failed (%p).\n", pmdev)); - s->pm.flags |= CS4281_PM_NOT_REGISTERED; - } + s->pm.flags |= CS4281_PM_NOT_REGISTERED; #endif pci_set_master(pcidev); // enable bus mastering @@ -4487,9 +4471,6 @@ static int __init cs4281_init_module(void) static void __exit cs4281_cleanup_module(void) { pci_unregister_driver(&cs4281_pci_driver); -#ifndef NOT_CS4281_PM - cs_pm_unregister_all(cs4281_pm_callback); -#endif CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO "cs4281: cleanup_cs4281() finished\n")); } diff --git a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c index d2a453a..90cbd76 100644 --- a/sound/oss/cs4281/cs4281pm-24.c +++ b/sound/oss/cs4281/cs4281pm-24.c @@ -27,9 +27,6 @@ #ifndef NOT_CS4281_PM #include <linux/pm.h> -#define cs_pm_register(a, b, c) pm_register((a), (b), (c)); -#define cs_pm_unregister_all(a) pm_unregister_all((a)); - static int cs4281_suspend(struct cs4281_state *s); static int cs4281_resume(struct cs4281_state *s); /* @@ -41,42 +38,6 @@ static int cs4281_resume(struct cs4281_state *s); #define CS4281_SUSPEND_TBL cs4281_suspend_null #define CS4281_RESUME_TBL cs4281_resume_null -static int cs4281_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) -{ - struct cs4281_state *state; - - CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n", - dev,(unsigned)rqst,data)); - state = (struct cs4281_state *) dev->data; - if (state) { - switch(rqst) { - case PM_SUSPEND: - CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs4281: PM suspend request\n")); - if(cs4281_suspend(state)) - { - CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO - "cs4281: PM suspend request refused\n")); - return 1; - } - break; - case PM_RESUME: - CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs4281: PM resume request\n")); - if(cs4281_resume(state)) - { - CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO - "cs4281: PM resume request refused\n")); - return 1; - } - break; - } - } - - return 0; -} - #else /* CS4281_PM */ #define CS4281_SUSPEND_TBL cs4281_suspend_null #define CS4281_RESUME_TBL cs4281_resume_null diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c index cb998e8..0da4d93 100644 --- a/sound/oss/cs46xx.c +++ b/sound/oss/cs46xx.c @@ -391,10 +391,6 @@ static void cs461x_clear_serial_FIFOs(struct cs_card *card, int type); static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state); static int cs46xx_resume_tbl(struct pci_dev *pcidev); -#ifndef CS46XX_ACPI_SUPPORT -static int cs46xx_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data); -#endif - #if CSDEBUG /* DEBUG ROUTINES */ @@ -5320,7 +5316,6 @@ static const char fndmsg[] = KERN_INFO "cs46xx: Found %d audio device(s).\n"; static int __devinit cs46xx_probe(struct pci_dev *pci_dev, const struct pci_device_id *pciid) { - struct pm_dev *pmdev; int i,j; u16 ss_card, ss_vendor; struct cs_card *card; @@ -5530,22 +5525,6 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev, PCI_SET_DMA_MASK(pci_dev, dma_mask); list_add(&card->list, &cs46xx_devs); - pmdev = cs_pm_register(PM_PCI_DEV, PM_PCI_ID(pci_dev), cs46xx_pm_callback); - if (pmdev) - { - CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO - "cs46xx: probe() pm_register() succeeded (%p).\n", - pmdev)); - pmdev->data = card; - } - else - { - CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 2, printk(KERN_INFO - "cs46xx: probe() pm_register() failed (%p).\n", - pmdev)); - card->pm.flags |= CS46XX_PM_NOT_REGISTERED; - } - CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=%p\n", (unsigned)card->pm.flags,card)); @@ -5727,7 +5706,6 @@ static int __init cs46xx_init_module(void) static void __exit cs46xx_cleanup_module(void) { pci_unregister_driver(&cs46xx_pci_driver); - cs_pm_unregister_all(cs46xx_pm_callback); CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: cleanup_cs46xx() finished\n")); } @@ -5735,44 +5713,6 @@ static void __exit cs46xx_cleanup_module(void) module_init(cs46xx_init_module); module_exit(cs46xx_cleanup_module); -#ifndef CS46XX_ACPI_SUPPORT -static int cs46xx_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) -{ - struct cs_card *card; - - CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs46xx: cs46xx_pm_callback dev=%p rqst=0x%x card=%p\n", - dev,(unsigned)rqst,data)); - card = (struct cs_card *) dev->data; - if (card) { - switch(rqst) { - case PM_SUSPEND: - CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs46xx: PM suspend request\n")); - if(cs46xx_suspend(card, PMSG_SUSPEND)) - { - CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO - "cs46xx: PM suspend request refused\n")); - return 1; - } - break; - case PM_RESUME: - CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs46xx: PM resume request\n")); - if(cs46xx_resume(card)) - { - CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO - "cs46xx: PM resume request refused\n")); - return 1; - } - break; - } - } - - return 0; -} -#endif - #if CS46XX_ACPI_SUPPORT static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state) { diff --git a/sound/oss/cs46xxpm-24.h b/sound/oss/cs46xxpm-24.h index e220bd7..ad82db8 100644 --- a/sound/oss/cs46xxpm-24.h +++ b/sound/oss/cs46xxpm-24.h @@ -38,13 +38,9 @@ */ static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state); static int cs46xx_resume_tbl(struct pci_dev *pcidev); -#define cs_pm_register(a, b, c) NULL -#define cs_pm_unregister_all(a) #define CS46XX_SUSPEND_TBL cs46xx_suspend_tbl #define CS46XX_RESUME_TBL cs46xx_resume_tbl #else -#define cs_pm_register(a, b, c) pm_register((a), (b), (c)); -#define cs_pm_unregister_all(a) pm_unregister_all((a)); #define CS46XX_SUSPEND_TBL cs46xx_null #define CS46XX_RESUME_TBL cs46xx_null #endif diff --git a/sound/oss/maestro.c b/sound/oss/maestro.c index 3abd354..f9ac5b1 100644 --- a/sound/oss/maestro.c +++ b/sound/oss/maestro.c @@ -230,10 +230,6 @@ #include <asm/page.h> #include <asm/uaccess.h> -#include <linux/pm.h> -#include <linux/pm_legacy.h> -static int maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *d); - #include "maestro.h" static struct pci_driver maestro_pci_driver; @@ -3404,7 +3400,6 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid) int i, ret; struct ess_card *card; struct ess_state *ess; - struct pm_dev *pmdev; int num = 0; /* when built into the kernel, we only print version if device is found */ @@ -3450,11 +3445,6 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid) memset(card, 0, sizeof(*card)); card->pcidev = pcidev; - pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), - maestro_pm_callback); - if (pmdev) - pmdev->data = card; - card->iobase = iobase; card->card_type = card_type; card->irq = pcidev->irq; @@ -3670,7 +3660,6 @@ static int maestro_notifier(struct notifier_block *nb, unsigned long event, void static void cleanup_maestro(void) { M_printk("maestro: unloading\n"); pci_unregister_driver(&maestro_pci_driver); - pm_unregister_all(maestro_pm_callback); unregister_reboot_notifier(&maestro_nb); } @@ -3691,143 +3680,5 @@ check_suspend(struct ess_card *card) current->state = TASK_RUNNING; } -static int -maestro_suspend(struct ess_card *card) -{ - unsigned long flags; - int i,j; - - spin_lock_irqsave(&card->lock,flags); /* over-kill */ - - M_printk("maestro: apm in dev %p\n",card); - - /* we have to read from the apu regs, need - to power it up */ - maestro_power(card,ACPI_D0); - - for(i=0;i<NR_DSPS;i++) { - struct ess_state *s = &card->channels[i]; - - if(s->dev_audio == -1) - continue; - - M_printk("maestro: stopping apus for device %d\n",i); - stop_dac(s); - stop_adc(s); - for(j=0;j<6;j++) - card->apu_map[s->apu[j]][5]=apu_get_register(s,j,5); - - } - - /* get rid of interrupts? */ - if( card->dsps_open > 0) - stop_bob(&card->channels[0]); - - card->in_suspend++; - - spin_unlock_irqrestore(&card->lock,flags); - - /* we trust in the bios to power down the chip on suspend. - * XXX I'm also not sure that in_suspend will protect - * against all reg accesses from here on out. - */ - return 0; -} -static int -maestro_resume(struct ess_card *card) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&card->lock,flags); /* over-kill */ - - card->in_suspend = 0; - - M_printk("maestro: resuming card at %p\n",card); - - /* restore all our config */ - maestro_config(card); - /* need to restore the base pointers.. */ - if(card->dmapages) - set_base_registers(&card->channels[0],card->dmapages); - - mixer_push_state(card); - - /* set each channels' apu control registers before - * restoring audio - */ - for(i=0;i<NR_DSPS;i++) { - struct ess_state *s = &card->channels[i]; - int chan,reg; - - if(s->dev_audio == -1) - continue; - - for(chan = 0 ; chan < 6 ; chan++) { - wave_set_register(s,s->apu[chan]<<3,s->apu_base[chan]); - for(reg = 1 ; reg < NR_APU_REGS ; reg++) - apu_set_register(s,chan,reg,s->card->apu_map[s->apu[chan]][reg]); - } - for(chan = 0 ; chan < 6 ; chan++) - apu_set_register(s,chan,0,s->card->apu_map[s->apu[chan]][0] & 0xFF0F); - } - - /* now we flip on the music */ - - if( card->dsps_open <= 0) { - /* this card's idle */ - maestro_power(card,ACPI_D2); - } else { - /* ok, we're actually playing things on - this card */ - maestro_power(card,ACPI_D0); - start_bob(&card->channels[0]); - for(i=0;i<NR_DSPS;i++) { - struct ess_state *s = &card->channels[i]; - - /* these use the apu_mode, and can handle - spurious calls */ - start_dac(s); - start_adc(s); - } - } - - spin_unlock_irqrestore(&card->lock,flags); - - /* all right, we think things are ready, - wake up people who were using the device - when we suspended */ - wake_up(&(card->suspend_queue)); - - return 0; -} - -int -maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) -{ - struct ess_card *card = (struct ess_card*) dev->data; - - if ( ! card ) goto out; - - M_printk("maestro: pm event 0x%x received for card %p\n", rqst, card); - - switch (rqst) { - case PM_SUSPEND: - maestro_suspend(card); - break; - case PM_RESUME: - maestro_resume(card); - break; - /* - * we'd also like to find out about - * power level changes because some biosen - * do mean things to the maestro when they - * change their power state. - */ - } -out: - return 0; -} - module_init(init_maestro); module_exit(cleanup_maestro); diff --git a/sound/oss/nm256_audio.c b/sound/oss/nm256_audio.c index 0ce2c40..42d8f05 100644 --- a/sound/oss/nm256_audio.c +++ b/sound/oss/nm256_audio.c @@ -24,8 +24,6 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/pm.h> -#include <linux/pm_legacy.h> #include <linux/delay.h> #include <linux/spinlock.h> #include "sound_config.h" @@ -49,7 +47,6 @@ static int nm256_grabInterrupt (struct nm256_info *card); static int nm256_releaseInterrupt (struct nm256_info *card); static irqreturn_t nm256_interrupt (int irq, void *dev_id, struct pt_regs *dummy); static irqreturn_t nm256_interrupt_zx (int irq, void *dev_id, struct pt_regs *dummy); -static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data); /* These belong in linux/pci.h. */ #define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 @@ -992,15 +989,6 @@ nm256_install_mixer (struct nm256_info *card) return 0; } -/* Perform a full reset on the hardware; this is invoked when an APM - resume event occurs. */ -static void -nm256_full_reset (struct nm256_info *card) -{ - nm256_initHw (card); - ac97_reset (&(card->mdev)); -} - /* * See if the signature left by the NM256 BIOS is intact; if so, we use * the associated address as the end of our audio buffer in the video @@ -1053,7 +1041,6 @@ static int __devinit nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr) { struct nm256_info *card; - struct pm_dev *pmdev; int x; if (pci_enable_device(pcidev)) @@ -1234,43 +1221,10 @@ nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr) nm256_install_mixer (card); - pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), handle_pm_event); - if (pmdev) - pmdev->data = card; - return 1; } -/* - * PM event handler, so the card is properly reinitialized after a power - * event. - */ -static int -handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data) -{ - struct nm256_info *crd = (struct nm256_info*) dev->data; - if (crd) { - switch (rqst) { - case PM_SUSPEND: - break; - case PM_RESUME: - { - int playing = crd->playing; - nm256_full_reset (crd); - /* - * A little ugly, but that's ok; pretend the - * block we were playing is done. - */ - if (playing) - DMAbuf_outputintr (crd->dev_for_play, 1); - } - break; - } - } - return 0; -} - static int __devinit nm256_probe(struct pci_dev *pcidev,const struct pci_device_id *pciid) { @@ -1696,7 +1650,6 @@ static int __init do_init_nm256(void) static void __exit cleanup_nm256 (void) { pci_unregister_driver(&nm256_pci_driver); - pm_unregister_all (&handle_pm_event); } module_init(do_init_nm256); diff --git a/sound/oss/opl3sa2.c b/sound/oss/opl3sa2.c index cd41d0e..5cecdbc 100644 --- a/sound/oss/opl3sa2.c +++ b/sound/oss/opl3sa2.c @@ -69,8 +69,6 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> -#include <linux/pm.h> -#include <linux/pm_legacy.h> #include "sound_config.h" #include "ad1848.h" @@ -139,10 +137,6 @@ typedef struct { struct pnp_dev* pdev; int activated; /* Whether said devices have been activated */ #endif -#ifdef CONFIG_PM_LEGACY - unsigned int in_suspend; - struct pm_dev *pmdev; -#endif unsigned int card; int chipset; /* What's my version(s)? */ char *chipset_name; @@ -341,22 +335,6 @@ static void opl3sa2_mixer_reset(opl3sa2_state_t* devc) } } -/* Currently only used for power management */ -#ifdef CONFIG_PM_LEGACY -static void opl3sa2_mixer_restore(opl3sa2_state_t* devc) -{ - if (devc) { - opl3sa2_set_volume(devc, devc->volume_l, devc->volume_r); - opl3sa2_set_mic(devc, devc->mic); - - if (devc->chipset == CHIPSET_OPL3SA3) { - opl3sa3_set_bass(devc, devc->bass_l, devc->bass_r); - opl3sa3_set_treble(devc, devc->treble_l, devc->treble_r); - } - } -} -#endif /* CONFIG_PM_LEGACY */ - static inline void arg_to_vol_mono(unsigned int vol, int* value) { int left; @@ -832,84 +810,6 @@ static struct pnp_driver opl3sa2_driver = { /* End of component functions */ -#ifdef CONFIG_PM_LEGACY - -static DEFINE_SPINLOCK(opl3sa2_lock); - -/* Power Management support functions */ -static int opl3sa2_suspend(struct pm_dev *pdev, unsigned int pm_mode) -{ - unsigned long flags; - opl3sa2_state_t *p; - - if (!pdev) - return -EINVAL; - - spin_lock_irqsave(&opl3sa2_lock,flags); - - p = (opl3sa2_state_t *) pdev->data; - switch (pm_mode) { - case 1: - pm_mode = OPL3SA2_PM_MODE1; - break; - case 2: - pm_mode = OPL3SA2_PM_MODE2; - break; - case 3: - pm_mode = OPL3SA2_PM_MODE3; - break; - default: - /* we don't know howto handle this... */ - spin_unlock_irqrestore(&opl3sa2_lock, flags); - return -EBUSY; - } - - p->in_suspend = 1; - - /* its supposed to automute before suspending, so we won't bother */ - opl3sa2_write(p->cfg_port, OPL3SA2_PM, pm_mode); - /* wait a while for the clock oscillator to stabilise */ - mdelay(10); - - spin_unlock_irqrestore(&opl3sa2_lock,flags); - return 0; -} - -static int opl3sa2_resume(struct pm_dev *pdev) -{ - unsigned long flags; - opl3sa2_state_t *p; - - if (!pdev) - return -EINVAL; - - p = (opl3sa2_state_t *) pdev->data; - spin_lock_irqsave(&opl3sa2_lock,flags); - - /* I don't think this is necessary */ - opl3sa2_write(p->cfg_port, OPL3SA2_PM, OPL3SA2_PM_MODE0); - opl3sa2_mixer_restore(p); - p->in_suspend = 0; - - spin_unlock_irqrestore(&opl3sa2_lock,flags); - return 0; -} - -static int opl3sa2_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data) -{ - unsigned long mode = (unsigned long)data; - - switch (rqst) { - case PM_SUSPEND: - return opl3sa2_suspend(pdev, mode); - - case PM_RESUME: - return opl3sa2_resume(pdev); - } - return 0; -} -#endif /* CONFIG_PM_LEGACY */ - /* * Install OPL3-SA2 based card(s). * @@ -1021,12 +921,6 @@ static int __init init_opl3sa2(void) /* ewww =) */ opl3sa2_state[card].card = card; -#ifdef CONFIG_PM_LEGACY - /* register our power management capabilities */ - opl3sa2_state[card].pmdev = pm_register(PM_ISA_DEV, card, opl3sa2_pm_callback); - if (opl3sa2_state[card].pmdev) - opl3sa2_state[card].pmdev->data = &opl3sa2_state[card]; -#endif /* CONFIG_PM_LEGACY */ /* * Set the Yamaha 3D enhancement mode (aka Ymersion) if asked to and @@ -1083,10 +977,6 @@ static void __exit cleanup_opl3sa2(void) int card; for(card = 0; card < opl3sa2_cards_num; card++) { -#ifdef CONFIG_PM_LEGACY - if (opl3sa2_state[card].pmdev) - pm_unregister(opl3sa2_state[card].pmdev); -#endif if (opl3sa2_state[card].cfg_mpu.slots[1] != -1) { unload_opl3sa2_mpu(&opl3sa2_state[card].cfg_mpu); } |