From 94cabd003e989556d8bf84027d96284dc2d99c76 Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Sat, 12 Nov 2005 18:53:48 +0000 Subject: [ARM] 3149/1: SharpSL: Add Akita (SL-C1000) machine support Patch from Richard Purdie Add the core machine support for the Sharp SL-C1000 (Akita) and enable the Kconfig selection for it. Signed-off-by: Richard Purdie Signed-off-by: Russell King --- include/asm-arm/arch-pxa/akita.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-arm/arch-pxa/akita.h b/include/asm-arm/arch-pxa/akita.h index 4a1fbcf..5d8cc1d 100644 --- a/include/asm-arm/arch-pxa/akita.h +++ b/include/asm-arm/arch-pxa/akita.h @@ -25,6 +25,8 @@ /* Default Values */ #define AKITA_IOEXP_IO_OUT (AKITA_IOEXP_IR_ON | AKITA_IOEXP_AKIN_PULLUP) +extern struct platform_device akitaioexp_device; + void akita_set_ioexp(struct device *dev, unsigned char bitmask); void akita_reset_ioexp(struct device *dev, unsigned char bitmask); -- cgit v1.1 From bd5d080ab99642e3245ef7cfa54490384c01d878 Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Sun, 13 Nov 2005 10:07:48 +0000 Subject: [ARM] 3160/1: SharpSL: Add driver for Akita specific GPIOs Patch from Richard Purdie Add a driver for the extra GPIOs found on the Sharp SL-C1000 (Akita). These GPIOs are found on a Maxim MAX7310 I2C i/o expander chip. A generic GPIO driver for the MAX7310 was attempted but this mini driver is a much simpler and much more effective solution avoiding several issues and complexity the generic driver had (as discussed on LKML). The platform device is required so the device parent can be set correctly which ensures the device is one of the last to suspend and first to resume. Whilst the i2c suspend/resume calls can be influenced, nothing guarantees this is easlier/later than the subsystems the gpios are used on which are all independent of i2c (sound, irda, video/backlight etc.). Signed-off-by: Richard Purdie Signed-off-by: Russell King --- include/linux/i2c-id.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 74abaec..1543daa 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -107,6 +107,7 @@ #define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */ #define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */ #define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ +#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */ #define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */ #define I2C_DRIVERID_EXP1 0xF1 -- cgit v1.1 From 47936357c0d14809c3c9547e532511f6625654b2 Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Sun, 13 Nov 2005 16:06:21 -0800 Subject: [PATCH] x86_64: fix tss limit Fix the x86_64 TSS limit in TSS descriptor. Signed-off-by: Suresh Siddha Acked-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-x86_64/desc.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index 68ac3c6..b837820 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h @@ -129,9 +129,16 @@ static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned static inline void set_tss_desc(unsigned cpu, void *addr) { - set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], (unsigned long)addr, - DESC_TSS, - sizeof(struct tss_struct) - 1); + /* + * sizeof(unsigned long) coming from an extra "long" at the end + * of the iobitmap. See tss_struct definition in processor.h + * + * -1? seg base+limit should be pointing to the address of the + * last valid byte + */ + set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], + (unsigned long)addr, DESC_TSS, + IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); } static inline void set_ldt_desc(unsigned cpu, void *addr, int size) -- cgit v1.1 From 95e861db3eaba7bc99f8605db70103ec3d078203 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 13 Nov 2005 16:06:24 -0800 Subject: [PATCH] reorder struct files_struct The file_lock spinlock sits close to mostly read fields of 'struct files_struct' In SMP (and NUMA) environments, each time a thread wants to open or close a file, it has to acquire the spinlock, thus invalidating the cache line containing this spinlock on other CPUS. So other threads doing read()/write()/... calls that use RCU to access the file table are going to ask further memory (possibly NUMA) transactions to read again this memory line. Move the spinlock to another cache line, so that concurrent threads can share the cache line containing 'count' and 'fdt' fields. It's worth up to 9% on a microbenchmark using a 4-thread 2-package x86 machine. See http://marc.theaimsgroup.com/?l=linux-kernel&m=112680448713342&w=2 Signed-off-by: Eric Dumazet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/file.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/file.h b/include/linux/file.h index d3b1a15..418b610 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -33,13 +33,13 @@ struct fdtable { * Open file table structure */ struct files_struct { - atomic_t count; - spinlock_t file_lock; /* Protects all the below members. Nests inside tsk->alloc_lock */ + atomic_t count; struct fdtable *fdt; struct fdtable fdtab; - fd_set close_on_exec_init; - fd_set open_fds_init; - struct file * fd_array[NR_OPEN_DEFAULT]; + fd_set close_on_exec_init; + fd_set open_fds_init; + struct file * fd_array[NR_OPEN_DEFAULT]; + spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */ }; #define files_fdtable(files) (rcu_dereference((files)->fdt)) -- cgit v1.1 From bca73e4bf8563d83f7856164caa44d5f42e44cca Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Sun, 13 Nov 2005 16:06:25 -0800 Subject: [PATCH] move pm_register/etc. to CONFIG_PM_LEGACY, pm_legacy.h Since few people need the support anymore, this moves the legacy pm_xxx functions to CONFIG_PM_LEGACY, and include/linux/pm_legacy.h. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pm.h | 49 ----------------------------------------- include/linux/pm_legacy.h | 56 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 49 deletions(-) create mode 100644 include/linux/pm_legacy.h (limited to 'include') diff --git a/include/linux/pm.h b/include/linux/pm.h index 1514098..5be87ba 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -94,55 +94,6 @@ struct pm_dev struct list_head entry; }; -#ifdef CONFIG_PM - -extern int pm_active; - -#define PM_IS_ACTIVE() (pm_active != 0) - -/* - * Register a device with power management - */ -struct pm_dev __deprecated * -pm_register(pm_dev_t type, unsigned long id, pm_callback callback); - -/* - * Unregister a device with power management - */ -void __deprecated pm_unregister(struct pm_dev *dev); - -/* - * Unregister all devices with matching callback - */ -void __deprecated pm_unregister_all(pm_callback callback); - -/* - * Send a request to all devices - */ -int __deprecated pm_send_all(pm_request_t rqst, void *data); - -#else /* CONFIG_PM */ - -#define PM_IS_ACTIVE() 0 - -static inline struct pm_dev *pm_register(pm_dev_t type, - unsigned long id, - pm_callback callback) -{ - return NULL; -} - -static inline void pm_unregister(struct pm_dev *dev) {} - -static inline void pm_unregister_all(pm_callback callback) {} - -static inline int pm_send_all(pm_request_t rqst, void *data) -{ - return 0; -} - -#endif /* CONFIG_PM */ - /* Functions above this comment are list-based old-style power * managment. Please avoid using them. */ diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h new file mode 100644 index 0000000..1252b45 --- /dev/null +++ b/include/linux/pm_legacy.h @@ -0,0 +1,56 @@ +#ifndef __LINUX_PM_LEGACY_H__ +#define __LINUX_PM_LEGACY_H__ + +#include + +#ifdef CONFIG_PM_LEGACY + +extern int pm_active; + +#define PM_IS_ACTIVE() (pm_active != 0) + +/* + * Register a device with power management + */ +struct pm_dev __deprecated * +pm_register(pm_dev_t type, unsigned long id, pm_callback callback); + +/* + * Unregister a device with power management + */ +void __deprecated pm_unregister(struct pm_dev *dev); + +/* + * Unregister all devices with matching callback + */ +void __deprecated pm_unregister_all(pm_callback callback); + +/* + * Send a request to all devices + */ +int __deprecated pm_send_all(pm_request_t rqst, void *data); + +#else /* CONFIG_PM_LEGACY */ + +#define PM_IS_ACTIVE() 0 + +static inline struct pm_dev *pm_register(pm_dev_t type, + unsigned long id, + pm_callback callback) +{ + return NULL; +} + +static inline void pm_unregister(struct pm_dev *dev) {} + +static inline void pm_unregister_all(pm_callback callback) {} + +static inline int pm_send_all(pm_request_t rqst, void *data) +{ + return 0; +} + +#endif /* CONFIG_PM_LEGACY */ + +#endif /* __LINUX_PM_LEGACY_H__ */ + -- cgit v1.1 From c1986ee9bea3d880bcf0d3f1a31e055778f306c7 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Sun, 13 Nov 2005 16:06:29 -0800 Subject: [PATCH] New Omnikey Cardman 4000 driver Add new Omnikey Cardman 4000 smartcard reader driver Signed-off-by: Harald Welte Cc: Dominik Brodowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cm4000_cs.h | 66 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 include/linux/cm4000_cs.h (limited to 'include') diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h new file mode 100644 index 0000000..605ebe2 --- /dev/null +++ b/include/linux/cm4000_cs.h @@ -0,0 +1,66 @@ +#ifndef _CM4000_H_ +#define _CM4000_H_ + +#define MAX_ATR 33 + +#define CM4000_MAX_DEV 4 + +/* those two structures are passed via ioctl() from/to userspace. They are + * used by existing userspace programs, so I kepth the awkward "bIFSD" naming + * not to break compilation of userspace apps. -HW */ + +typedef struct atreq { + int32_t atr_len; + unsigned char atr[64]; + int32_t power_act; + unsigned char bIFSD; + unsigned char bIFSC; +} atreq_t; + + +/* what is particularly stupid in the original driver is the arch-dependant + * member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace + * will lay out the structure members differently than the 64bit kernel. + * + * I've changed "ptsreq.protocol" from "unsigned long" to "u_int32_t". + * On 32bit this will make no difference. With 64bit kernels, it will make + * 32bit apps work, too. + */ + +typedef struct ptsreq { + u_int32_t protocol; /*T=0: 2^0, T=1: 2^1*/ + unsigned char flags; + unsigned char pts1; + unsigned char pts2; + unsigned char pts3; +} ptsreq_t; + +#define CM_IOC_MAGIC 'c' +#define CM_IOC_MAXNR 255 + +#define CM_IOCGSTATUS _IOR (CM_IOC_MAGIC, 0, unsigned char *) +#define CM_IOCGATR _IOWR(CM_IOC_MAGIC, 1, atreq_t *) +#define CM_IOCSPTS _IOW (CM_IOC_MAGIC, 2, ptsreq_t *) +#define CM_IOCSRDR _IO (CM_IOC_MAGIC, 3) +#define CM_IOCARDOFF _IO (CM_IOC_MAGIC, 4) + +#define CM_IOSDBGLVL _IOW(CM_IOC_MAGIC, 250, int*) + +/* card and device states */ +#define CM_CARD_INSERTED 0x01 +#define CM_CARD_POWERED 0x02 +#define CM_ATR_PRESENT 0x04 +#define CM_ATR_VALID 0x08 +#define CM_STATE_VALID 0x0f +/* extra info only from CM4000 */ +#define CM_NO_READER 0x10 +#define CM_BAD_CARD 0x20 + + +#ifdef __KERNEL__ + +#define DEVICE_NAME "cmm" +#define MODULE_NAME "cm4000_cs" + +#endif /* __KERNEL__ */ +#endif /* _CM4000_H_ */ -- cgit v1.1 From 4c8d3d997ef3c0594350fba716529905b314287e Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Sun, 13 Nov 2005 16:06:30 -0800 Subject: [PATCH] Update email address for Kumar Changed jobs and the Freescale address is no longer valid. Signed-off-by: Kumar Gala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc/immap_85xx.h | 2 +- include/asm-ppc/ipic.h | 2 +- include/asm-ppc/mpc83xx.h | 2 +- include/asm-ppc/mpc85xx.h | 2 +- include/asm-ppc/ppc_sys.h | 2 +- include/linux/fsl_devices.h | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-ppc/immap_85xx.h b/include/asm-ppc/immap_85xx.h index 50fb5e4..9383d0c 100644 --- a/include/asm-ppc/immap_85xx.h +++ b/include/asm-ppc/immap_85xx.h @@ -3,7 +3,7 @@ * * MPC85xx Internal Memory Map * - * Maintainer: Kumar Gala + * Maintainer: Kumar Gala * * Copyright 2004 Freescale Semiconductor, Inc * diff --git a/include/asm-ppc/ipic.h b/include/asm-ppc/ipic.h index 9092b92..0fe396a 100644 --- a/include/asm-ppc/ipic.h +++ b/include/asm-ppc/ipic.h @@ -3,7 +3,7 @@ * * IPIC external definitions and structure. * - * Maintainer: Kumar Gala + * Maintainer: Kumar Gala * * Copyright 2005 Freescale Semiconductor, Inc * diff --git a/include/asm-ppc/mpc83xx.h b/include/asm-ppc/mpc83xx.h index ce21220..7cdf60f 100644 --- a/include/asm-ppc/mpc83xx.h +++ b/include/asm-ppc/mpc83xx.h @@ -3,7 +3,7 @@ * * MPC83xx definitions * - * Maintainer: Kumar Gala + * Maintainer: Kumar Gala * * Copyright 2005 Freescale Semiconductor, Inc * diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h index d98db98..9d14bae 100644 --- a/include/asm-ppc/mpc85xx.h +++ b/include/asm-ppc/mpc85xx.h @@ -3,7 +3,7 @@ * * MPC85xx definitions * - * Maintainer: Kumar Gala + * Maintainer: Kumar Gala * * Copyright 2004 Freescale Semiconductor, Inc * diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h index bba5305..83d8c77 100644 --- a/include/asm-ppc/ppc_sys.h +++ b/include/asm-ppc/ppc_sys.h @@ -3,7 +3,7 @@ * * PPC system definitions and library functions * - * Maintainer: Kumar Gala + * Maintainer: Kumar Gala * * Copyright 2005 Freescale Semiconductor, Inc * diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 114d5d5..934aa9b 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -4,7 +4,7 @@ * Definitions for any platform device related flags or structures for * Freescale processor devices * - * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * Maintainer: Kumar Gala * * Copyright 2004 Freescale Semiconductor, Inc * -- cgit v1.1 From 51c6f666fceb3184eeff045dad4432b602cd648e Mon Sep 17 00:00:00 2001 From: Robin Holt Date: Sun, 13 Nov 2005 16:06:42 -0800 Subject: [PATCH] mm: ZAP_BLOCK causes redundant work The address based work estimate for unmapping (for lockbreak) is and always was horribly inefficient for sparse mappings. The problem is most simply explained with an example: If we find a pgd is clear, we still have to call into unmap_page_range PGDIR_SIZE / ZAP_BLOCK_SIZE times, each time checking the clear pgd, in order to progress the working address to the next pgd. The fundamental way to solve the problem is to keep track of the end address we've processed and pass it back to the higher layers. From: Nick Piggin Modification to completely get away from address based work estimate and instead use an abstract count, with a very small cost for empty entries as opposed to present pages. On 2.6.14-git2, ppc64, and CONFIG_PREEMPT=y, mapping and unmapping 1TB of virtual address space takes 1.69s; with the following patch applied, this operation can be done 1000 times in less than 0.01s From: Andrew Morton With CONFIG_HUTETLB_PAGE=n: mm/memory.c: In function `unmap_vmas': mm/memory.c:779: warning: division by zero Due to zap_work -= (end - start) / (HPAGE_SIZE / PAGE_SIZE); So make the dummy HPAGE_SIZE non-zero Signed-off-by: Robin Holt Signed-off-by: Nick Piggin Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0cea162..1056717 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -102,8 +102,8 @@ static inline unsigned long hugetlb_total_pages(void) #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) #ifndef HPAGE_MASK -#define HPAGE_MASK 0 /* Keep the compiler happy */ -#define HPAGE_SIZE 0 +#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */ +#define HPAGE_SIZE PAGE_SIZE #endif #endif /* !CONFIG_HUGETLB_PAGE */ -- cgit v1.1 From 7fb1d9fca5c6e3b06773b69165a73f3fb786b8ee Mon Sep 17 00:00:00 2001 From: Rohit Seth Date: Sun, 13 Nov 2005 16:06:43 -0800 Subject: [PATCH] mm: __alloc_pages cleanup Clean up of __alloc_pages. Restoration of previous behaviour, plus further cleanups by introducing an 'alloc_flags', removing the last of should_reclaim_zone. Signed-off-by: Rohit Seth Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f5fa308..6cfb114 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -329,7 +329,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive, void build_all_zonelists(void); void wakeup_kswapd(struct zone *zone, int order); int zone_watermark_ok(struct zone *z, int order, unsigned long mark, - int alloc_type, int can_try_harder, gfp_t gfp_high); + int classzone_idx, int alloc_flags); #ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); -- cgit v1.1 From 2d6c666e8704cf06267f29a4fa3d2cf823469c38 Mon Sep 17 00:00:00 2001 From: Paul Jackson Date: Sun, 13 Nov 2005 16:06:44 -0800 Subject: [PATCH] mm: gfp_noreclaim cleanup Remove last remnant of the defunct early reclaim page logic, the no longer used __GFP_NORECLAIM flag bit. Signed-off-by: Paul Jackson Acked-by: Martin Hicks Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 5 ++--- include/linux/pagemap.h | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c377943..23279d8 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -39,8 +39,7 @@ struct vm_area_struct; #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ -#define __GFP_NORECLAIM ((__force gfp_t)0x20000u) /* No realy zone reclaim during allocation */ -#define __GFP_HARDWALL ((__force gfp_t)0x40000u) /* Enforce hardwall cpuset memory allocs */ +#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) @@ -49,7 +48,7 @@ struct vm_area_struct; #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ - __GFP_NOMEMALLOC|__GFP_NORECLAIM|__GFP_HARDWALL) + __GFP_NOMEMALLOC|__GFP_HARDWALL) #define GFP_ATOMIC (__GFP_HIGH) #define GFP_NOIO (__GFP_WAIT) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ba6c310..ee700c6 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -53,12 +53,12 @@ void release_pages(struct page **pages, int nr, int cold); static inline struct page *page_cache_alloc(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0); + return alloc_pages(mapping_gfp_mask(x), 0); } static inline struct page *page_cache_alloc_cold(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0); + return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); } typedef int filler_t(void *, struct page *); -- cgit v1.1 From 7fce260a6bf75080ef61408504add5618f90e41b Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Sun, 13 Nov 2005 16:06:48 -0800 Subject: [PATCH] ppc: add support for new powerbooks Enablement patch for the new PowerBooks (late 2005 edition). This enables the ATA controller, Gigabit ethernet and basic AGP setup. Bluetooth works out-of-the box after running hid2hci. Still remaining is to get the touchpad to work, the simple change of just adding the new USB ids isn't enough. Signed-off-by: Olof Johansson Acked-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pci_ids.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d00f8ba..d4c1c8f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -805,6 +805,10 @@ #define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051 #define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058 #define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059 +#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066 +#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069 +#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a +#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b #define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 #define PCI_VENDOR_ID_YAMAHA 0x1073 -- cgit v1.1 From a1261f54611ec4ad6a7ab7080f86747e3ac3685b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 13 Nov 2005 16:06:55 -0800 Subject: [PATCH] m68k: introduce task_thread_info new helper - task_thread_info(task). On platforms that have thread_info allocated separately (i.e. in default case) it simply returns task->thread_info. m68k wants (and for good reasons) to embed its thread_info into task_struct. So it will (in later patch) have task_thread_info() of its own. For now we just add a macro for generic case and convert existing instances of its body in core kernel to uses of new macro. Obviously safe - all normal architectures get the same preprocessor output they used to get. Signed-off-by: Al Viro Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 2bbf968..f865031 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1233,32 +1233,34 @@ static inline void task_unlock(struct task_struct *p) spin_unlock(&p->alloc_lock); } +#define task_thread_info(task) (task)->thread_info + /* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */ static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) { - set_ti_thread_flag(tsk->thread_info,flag); + set_ti_thread_flag(task_thread_info(tsk), flag); } static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) { - clear_ti_thread_flag(tsk->thread_info,flag); + clear_ti_thread_flag(task_thread_info(tsk), flag); } static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) { - return test_and_set_ti_thread_flag(tsk->thread_info,flag); + return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); } static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) { - return test_and_clear_ti_thread_flag(tsk->thread_info,flag); + return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); } static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) { - return test_ti_thread_flag(tsk->thread_info,flag); + return test_ti_thread_flag(task_thread_info(tsk), flag); } static inline void set_tsk_need_resched(struct task_struct *tsk) @@ -1329,12 +1331,12 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped); static inline unsigned int task_cpu(const struct task_struct *p) { - return p->thread_info->cpu; + return task_thread_info(p)->cpu; } static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) { - p->thread_info->cpu = cpu; + task_thread_info(p)->cpu = cpu; } #else -- cgit v1.1 From 10ebffde3d3916026974352b7900e44afe2b243f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 13 Nov 2005 16:06:56 -0800 Subject: [PATCH] m68k: introduce setup_thread_stack() and end_of_stack() encapsulates the rest of arch-dependent operations with thread_info access. Two new helpers - setup_thread_stack() and end_of_stack(). For normal case the former consists of copying thread_info of parent to new thread_info and the latter returns pointer immediately past the end of thread_info. Signed-off-by: Al Viro Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index f865031..e468125 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1235,6 +1235,17 @@ static inline void task_unlock(struct task_struct *p) #define task_thread_info(task) (task)->thread_info +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +static inline unsigned long *end_of_stack(struct task_struct *p) +{ + return (unsigned long *)(p->thread_info + 1); +} + /* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */ -- cgit v1.1 From f037360f2ed111fe89a8f5cb6ba351f4e9934e53 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 13 Nov 2005 16:06:57 -0800 Subject: [PATCH] m68k: thread_info header cleanup a) in smp_lock.h #include of sched.h and spinlock.h moved under #ifdef CONFIG_LOCK_KERNEL. b) interrupt.h now explicitly pulls sched.h (not via smp_lock.h from hardirq.h as it used to) c) in three more places we need changes to compensate for (a) - one place in arch/sparc needs string.h now, hardirq.h needs forward declaration of task_struct and preempt.h needs direct include of thread_info.h. d) thread_info-related helpers in sched.h and thread_info.h put under ifndef __HAVE_THREAD_FUNCTIONS. Obviously safe. Signed-off-by: Al Viro Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hardirq.h | 2 ++ include/linux/interrupt.h | 1 + include/linux/preempt.h | 1 + include/linux/sched.h | 4 ++++ include/linux/smp_lock.h | 3 +-- 5 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 5912874..71d2b8a 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -90,6 +90,8 @@ extern void synchronize_irq(unsigned int irq); #define nmi_enter() irq_enter() #define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) +struct task_struct; + #ifndef CONFIG_VIRT_CPU_ACCOUNTING static inline void account_user_vtime(struct task_struct *tsk) { diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0a90205..41f150a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/preempt.h b/include/linux/preempt.h index dd98c54..d9a2f52 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -7,6 +7,7 @@ */ #include +#include #include #ifdef CONFIG_DEBUG_PREEMPT diff --git a/include/linux/sched.h b/include/linux/sched.h index e468125..41df813 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1233,6 +1233,8 @@ static inline void task_unlock(struct task_struct *p) spin_unlock(&p->alloc_lock); } +#ifndef __HAVE_THREAD_FUNCTIONS + #define task_thread_info(task) (task)->thread_info static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) @@ -1246,6 +1248,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p) return (unsigned long *)(p->thread_info + 1); } +#endif + /* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */ diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index b63ce70..fa1ff3b 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -2,11 +2,10 @@ #define __LINUX_SMPLOCK_H #include +#ifdef CONFIG_LOCK_KERNEL #include #include -#ifdef CONFIG_LOCK_KERNEL - #define kernel_locked() (current->lock_depth >= 0) extern int __lockfunc __reacquire_kernel_lock(void); -- cgit v1.1 From abd03753bd1532c05eb13231569a5257b007e29c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 13 Nov 2005 16:06:58 -0800 Subject: [PATCH] m68k: m68k-specific thread_info changes a) added embedded thread_info [m68k processor.h] b) added missing symbols in asm-offsets.c c) task_thread_info() and friends in asm-m68k/thread_info.h d) made m68k thread_info.h included by m68k processor.h, not the other way round. Signed-off-by: Al Viro Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-m68k/processor.h | 2 ++ include/asm-m68k/thread_info.h | 14 ++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h index df1575d..84b4b26 100644 --- a/include/asm-m68k/processor.h +++ b/include/asm-m68k/processor.h @@ -14,6 +14,7 @@ #define current_text_addr() ({ __label__ _l; _l: &&_l;}) #include +#include #include #include #include @@ -79,6 +80,7 @@ struct thread_struct { unsigned long fpcntl[3]; /* fp control regs */ unsigned char fpstate[FPSTATESIZE]; /* floating point state */ struct task_work work; + struct thread_info info; }; #define INIT_THREAD { \ diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h index 2aed24f..4fdbf55 100644 --- a/include/asm-m68k/thread_info.h +++ b/include/asm-m68k/thread_info.h @@ -2,7 +2,6 @@ #define _ASM_M68K_THREAD_INFO_H #include -#include #include struct thread_info { @@ -35,14 +34,21 @@ struct thread_info { #define free_thread_info(ti) free_pages((unsigned long)(ti),1) #endif /* PAGE_SHIFT == 13 */ -//#define init_thread_info (init_task.thread.info) +#define init_thread_info (init_task.thread.info) #define init_stack (init_thread_union.stack) -#define current_thread_info() (current->thread_info) - +#define task_thread_info(tsk) (&(tsk)->thread.info) +#define current_thread_info() task_thread_info(current) #define __HAVE_THREAD_FUNCTIONS +#define setup_thread_stack(p, org) ({ \ + *(struct task_struct **)(p)->thread_info = (p); \ + task_thread_info(p)->task = (p); \ +}) + +#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1) + #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_DELAYED_TRACE 1 /* single step a syscall */ #define TIF_NOTIFY_RESUME 2 /* resumption notification requested */ -- cgit v1.1 From 3b66a1edb01b82269a668a478625765b1fa4936f Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sun, 13 Nov 2005 16:06:59 -0800 Subject: [PATCH] m68k: convert thread flags to use bit fields Remove task_work structure, use the standard thread flags functions and use shifts in entry.S to test the thread flags. Add a few local labels to entry.S to allow gas to generate short jumps. Finally it changes a number of inline functions in thread_info.h to macros to delay the current_thread_info() usage, which requires on m68k a structure (task_struct) not yet defined at this point. Signed-off-by: Roman Zippel Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-m68k/processor.h | 12 ------- include/asm-m68k/thread_info.h | 81 +++++------------------------------------- include/linux/thread_info.h | 47 +++++++----------------- 3 files changed, 22 insertions(+), 118 deletions(-) (limited to 'include') diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h index 84b4b26..7982285 100644 --- a/include/asm-m68k/processor.h +++ b/include/asm-m68k/processor.h @@ -56,17 +56,6 @@ static inline void wrusp(unsigned long usp) #endif #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr) -struct task_work { - unsigned char sigpending; - unsigned char notify_resume; /* request for notification on - userspace execution resumption */ - char need_resched; - unsigned char delayed_trace; /* single step a syscall */ - unsigned char syscall_trace; /* count of syscall interceptors */ - unsigned char memdie; /* task was selected to be killed */ - unsigned char pad[2]; -}; - struct thread_struct { unsigned long ksp; /* kernel stack pointer */ unsigned long usp; /* user stack pointer */ @@ -79,7 +68,6 @@ struct thread_struct { unsigned long fp[8*3]; unsigned long fpcntl[3]; /* fp control regs */ unsigned char fpstate[FPSTATESIZE]; /* floating point state */ - struct task_work work; struct thread_info info; }; diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h index 4fdbf55..9532ca3 100644 --- a/include/asm-m68k/thread_info.h +++ b/include/asm-m68k/thread_info.h @@ -6,12 +6,11 @@ struct thread_info { struct task_struct *task; /* main task structure */ + unsigned long flags; struct exec_domain *exec_domain; /* execution domain */ int preempt_count; /* 0 => preemptable, <0 => BUG */ __u32 cpu; /* should always be 0 on m68k */ struct restart_block restart_block; - - __u8 supervisor_stack[0]; }; #define PREEMPT_ACTIVE 0x4000000 @@ -49,76 +48,14 @@ struct thread_info { #define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1) -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_DELAYED_TRACE 1 /* single step a syscall */ -#define TIF_NOTIFY_RESUME 2 /* resumption notification requested */ -#define TIF_SIGPENDING 3 /* signal pending */ -#define TIF_NEED_RESCHED 4 /* rescheduling necessary */ -#define TIF_MEMDIE 5 - -extern int thread_flag_fixme(void); - -/* - * flag set/clear/test wrappers - * - pass TIF_xxxx constants to these functions +/* entry.S relies on these definitions! + * bits 0-7 are tested at every exception exit + * bits 8-15 are also tested at syscall exit */ - -#define __set_tsk_thread_flag(tsk, flag, val) ({ \ - switch (flag) { \ - case TIF_SIGPENDING: \ - tsk->thread.work.sigpending = val; \ - break; \ - case TIF_NEED_RESCHED: \ - tsk->thread.work.need_resched = val; \ - break; \ - case TIF_SYSCALL_TRACE: \ - tsk->thread.work.syscall_trace = val; \ - break; \ - case TIF_MEMDIE: \ - tsk->thread.work.memdie = val; \ - break; \ - default: \ - thread_flag_fixme(); \ - } \ -}) - -#define __get_tsk_thread_flag(tsk, flag) ({ \ - int ___res; \ - switch (flag) { \ - case TIF_SIGPENDING: \ - ___res = tsk->thread.work.sigpending; \ - break; \ - case TIF_NEED_RESCHED: \ - ___res = tsk->thread.work.need_resched; \ - break; \ - case TIF_SYSCALL_TRACE: \ - ___res = tsk->thread.work.syscall_trace;\ - break; \ - case TIF_MEMDIE: \ - ___res = tsk->thread.work.memdie;\ - break; \ - default: \ - ___res = thread_flag_fixme(); \ - } \ - ___res; \ -}) - -#define __get_set_tsk_thread_flag(tsk, flag, val) ({ \ - int __res = __get_tsk_thread_flag(tsk, flag); \ - __set_tsk_thread_flag(tsk, flag, val); \ - __res; \ -}) - -#define set_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, ~0) -#define clear_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, 0) -#define test_and_set_tsk_thread_flag(tsk, flag) __get_set_tsk_thread_flag(tsk, flag, ~0) -#define test_tsk_thread_flag(tsk, flag) __get_tsk_thread_flag(tsk, flag) - -#define set_thread_flag(flag) set_tsk_thread_flag(current, flag) -#define clear_thread_flag(flag) clear_tsk_thread_flag(current, flag) -#define test_thread_flag(flag) test_tsk_thread_flag(current, flag) - -#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED) -#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED) +#define TIF_SIGPENDING 6 /* signal pending */ +#define TIF_NEED_RESCHED 7 /* rescheduling necessary */ +#define TIF_DELAYED_TRACE 14 /* single step a syscall */ +#define TIF_SYSCALL_TRACE 15 /* syscall trace active */ +#define TIF_MEMDIE 16 #endif /* _ASM_M68K_THREAD_INFO_H */ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index d252f45..1c4eb41 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -27,31 +27,6 @@ extern long do_no_restart_syscall(struct restart_block *parm); * - pass TIF_xxxx constants to these functions */ -static inline void set_thread_flag(int flag) -{ - set_bit(flag,¤t_thread_info()->flags); -} - -static inline void clear_thread_flag(int flag) -{ - clear_bit(flag,¤t_thread_info()->flags); -} - -static inline int test_and_set_thread_flag(int flag) -{ - return test_and_set_bit(flag,¤t_thread_info()->flags); -} - -static inline int test_and_clear_thread_flag(int flag) -{ - return test_and_clear_bit(flag,¤t_thread_info()->flags); -} - -static inline int test_thread_flag(int flag) -{ - return test_bit(flag,¤t_thread_info()->flags); -} - static inline void set_ti_thread_flag(struct thread_info *ti, int flag) { set_bit(flag,&ti->flags); @@ -77,15 +52,19 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) return test_bit(flag,&ti->flags); } -static inline void set_need_resched(void) -{ - set_thread_flag(TIF_NEED_RESCHED); -} - -static inline void clear_need_resched(void) -{ - clear_thread_flag(TIF_NEED_RESCHED); -} +#define set_thread_flag(flag) \ + set_ti_thread_flag(current_thread_info(), flag) +#define clear_thread_flag(flag) \ + clear_ti_thread_flag(current_thread_info(), flag) +#define test_and_set_thread_flag(flag) \ + test_and_set_ti_thread_flag(current_thread_info(), flag) +#define test_and_clear_thread_flag(flag) \ + test_and_clear_ti_thread_flag(current_thread_info(), flag) +#define test_thread_flag(flag) \ + test_ti_thread_flag(current_thread_info(), flag) + +#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED) +#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED) #endif -- cgit v1.1 From 66341a905ef5b3e7aea65b5d9bd1b0361b0ccc61 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 13 Nov 2005 16:07:21 -0800 Subject: [PATCH] Shut up per_cpu_ptr() on UP Currently per_cpu_ptr() doesn't really do anything with 'cpu' in the UP case. This is problematic in the cases where this is the only place the variable is referenced: CC kernel/workqueue.o kernel/workqueue.c: In function `current_is_keventd': kernel/workqueue.c:460: warning: unused variable `cpu' Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/percpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5451eb1..fb8d2d2 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -38,7 +38,7 @@ extern void free_percpu(const void *); #else /* CONFIG_SMP */ -#define per_cpu_ptr(ptr, cpu) (ptr) +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) static inline void *__alloc_percpu(size_t size, size_t align) { -- cgit v1.1 From 53e86b91b7ae66d4c2757195cbd42e00d9199cf2 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 13 Nov 2005 16:07:23 -0800 Subject: [PATCH] i386: generic cmpxchg - Make cmpxchg generally available on the i386 platform. - Provide emulation of cmpxchg suitable for uniprocessor if built and run on 386. From: Christoph Lameter - Cut down patch and small style changes. Signed-off-by: Nick Piggin Signed-off-by: Christoph Lameter Cc: "Paul E. McKenney" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/system.h | 42 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 97d52ac..772f85d 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -263,6 +263,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 +#define cmpxchg(ptr,o,n)\ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) +#endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) @@ -291,10 +295,42 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return old; } -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) +#ifndef CONFIG_X86_CMPXCHG +/* + * Building a kernel capable running on 80386. It may be necessary to + * simulate the cmpxchg on the 80386 CPU. For that purpose we define + * a function for each of the sizes we support. + */ +extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); +extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); +extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); + +static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + return cmpxchg_386_u8(ptr, old, new); + case 2: + return cmpxchg_386_u16(ptr, old, new); + case 4: + return cmpxchg_386_u32(ptr, old, new); + } + return old; +} + +#define cmpxchg(ptr,o,n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + if (likely(boot_cpu_data.x86 > 3)) \ + __ret = __cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + else \ + __ret = cmpxchg_386((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ +}) #endif #ifdef CONFIG_X86_CMPXCHG64 -- cgit v1.1 From 4a6dae6d382e9edf3ff440b819e554ed706359bc Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 13 Nov 2005 16:07:24 -0800 Subject: [PATCH] atomic: cmpxchg Introduce an atomic_cmpxchg operation. Signed-off-by: Nick Piggin Cc: "Paul E. McKenney" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-alpha/atomic.h | 2 ++ include/asm-arm/atomic.h | 31 +++++++++++++++++++++++++++++++ include/asm-arm26/atomic.h | 14 ++++++++++++++ include/asm-cris/atomic.h | 13 +++++++++++++ include/asm-frv/atomic.h | 2 ++ include/asm-h8300/atomic.h | 13 +++++++++++++ include/asm-i386/atomic.h | 2 ++ include/asm-ia64/atomic.h | 2 ++ include/asm-m68k/atomic.h | 2 ++ include/asm-m68knommu/atomic.h | 2 ++ include/asm-mips/atomic.h | 2 ++ include/asm-parisc/atomic.h | 1 + include/asm-powerpc/atomic.h | 2 ++ include/asm-s390/atomic.h | 2 ++ include/asm-sh/atomic.h | 14 ++++++++++++++ include/asm-sh64/atomic.h | 14 ++++++++++++++ include/asm-sparc/atomic.h | 1 + include/asm-sparc64/atomic.h | 2 ++ include/asm-v850/atomic.h | 14 ++++++++++++++ include/asm-x86_64/atomic.h | 2 ++ include/asm-xtensa/atomic.h | 1 + 21 files changed, 138 insertions(+) (limited to 'include') diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 20ac3d9..a666080 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -177,6 +177,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) return result; } +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index 2885972..8ab1689 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -80,6 +80,23 @@ static inline int atomic_sub_return(int i, atomic_t *v) return result; } +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + u32 oldval, res; + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%2]\n" + "teq %1, %3\n" + "strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + return oldval; +} + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) { unsigned long tmp, tmp2; @@ -131,6 +148,20 @@ static inline int atomic_sub_return(int i, atomic_t *v) return val; } +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + + return ret; +} + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) { unsigned long flags; diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h index 4a88235..54b24ea 100644 --- a/include/asm-arm26/atomic.h +++ b/include/asm-arm26/atomic.h @@ -62,6 +62,20 @@ static inline int atomic_sub_return(int i, atomic_t *v) return val; } +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + + return ret; +} + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) { unsigned long flags; diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h index 8c2e783..45891f7d 100644 --- a/include/asm-cris/atomic.h +++ b/include/asm-cris/atomic.h @@ -123,6 +123,19 @@ static inline int atomic_inc_and_test(volatile atomic_t *v) return retval; } +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + cris_atomic_save(v, flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + cris_atomic_restore(v, flags); + return ret; +} + /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index e759684..55f06a0 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -414,4 +414,6 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); #endif +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) + #endif /* _ASM_ATOMIC_H */ diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h index 7230f65..d504392 100644 --- a/include/asm-h8300/atomic.h +++ b/include/asm-h8300/atomic.h @@ -82,6 +82,19 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) return ret == 0; } +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + return ret; +} + static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) { __asm__ __volatile__("stc ccr,r1l\n\t" diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 509720b..5ff698e 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -215,6 +215,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) return atomic_add_return(-i,v); } +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) + #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 874a6f8..593d3da 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -88,6 +88,8 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) return new; } +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) + #define atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index 38f3043..b821975 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -139,6 +139,8 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); } +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) + /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h index a83631e..2fd33a5 100644 --- a/include/asm-m68knommu/atomic.h +++ b/include/asm-m68knommu/atomic.h @@ -128,6 +128,8 @@ static inline int atomic_sub_return(int i, atomic_t * v) return temp; } +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 6202eb8..4fba0d0 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -287,6 +287,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) return result; } +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 048a2c7..52c9a45 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h @@ -164,6 +164,7 @@ static __inline__ int atomic_read(const atomic_t *v) } /* exported interface */ +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 9c0b372..37205fa 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -164,6 +164,8 @@ static __inline__ int atomic_dec_return(atomic_t *v) return t; } +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) + #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index 9d86ba6..631014d 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -198,6 +198,8 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) return retval; } +#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) + #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__before_atomic_inc() smp_mb() diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index 3c4f805d..a148c76 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -87,6 +87,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) #define atomic_inc(v) atomic_add(1,(v)) #define atomic_dec(v) atomic_sub(1,(v)) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + + return ret; +} + static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long flags; diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h index 8c3872d..6eeb57b 100644 --- a/include/asm-sh64/atomic.h +++ b/include/asm-sh64/atomic.h @@ -99,6 +99,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) #define atomic_inc(v) atomic_add(1,(v)) #define atomic_dec(v) atomic_sub(1,(v)) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + + return ret; +} + static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long flags; diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index 37f6ab6..52bdd1a 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h @@ -19,6 +19,7 @@ typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } extern int __atomic_add_return(int, atomic_t *); +extern int atomic_cmpxchg(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); #define atomic_read(v) ((v)->counter) diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index e175afc..3a0b438 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -70,6 +70,8 @@ extern int atomic64_sub_ret(int, atomic64_t *); #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) + /* Atomic operations are already serializing */ #ifdef CONFIG_SMP #define smp_mb__before_atomic_dec() membar_storeload_loadload(); diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h index 395268a..e497166 100644 --- a/include/asm-v850/atomic.h +++ b/include/asm-v850/atomic.h @@ -90,6 +90,20 @@ static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *add #define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0) #define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + + return ret; +} + /* Atomic operations are already serializing on ARM */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index fc4c595..75c8a1e 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -360,6 +360,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) return atomic_add_return(-i,v); } +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) + #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index 12b5732..cd40c5e 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -223,6 +223,7 @@ static inline int atomic_sub_return(int i, atomic_t * v) */ #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { -- cgit v1.1 From 8426e1f6af0fd7f44d040af7263750c5a52f3cc3 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 13 Nov 2005 16:07:25 -0800 Subject: [PATCH] atomic: inc_not_zero Introduce an atomic_inc_not_zero operation. Make this a special case of atomic_add_unless because lockless pagecache actually wants atomic_inc_not_negativeone due to its offset refcount. Signed-off-by: Nick Piggin Cc: "Paul E. McKenney" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-alpha/atomic.h | 10 ++++++++++ include/asm-arm/atomic.h | 11 +++++++++++ include/asm-arm26/atomic.h | 15 +++++++++++++++ include/asm-cris/atomic.h | 14 ++++++++++++++ include/asm-frv/atomic.h | 10 ++++++++++ include/asm-h8300/atomic.h | 14 ++++++++++++++ include/asm-i386/atomic.h | 19 +++++++++++++++++++ include/asm-ia64/atomic.h | 10 ++++++++++ include/asm-m68k/atomic.h | 10 ++++++++++ include/asm-m68knommu/atomic.h | 10 ++++++++++ include/asm-mips/atomic.h | 19 +++++++++++++++++++ include/asm-parisc/atomic.h | 19 +++++++++++++++++++ include/asm-powerpc/atomic.h | 25 +++++++++++++++++++++++++ include/asm-s390/atomic.h | 10 ++++++++++ include/asm-sh/atomic.h | 15 +++++++++++++++ include/asm-sh64/atomic.h | 15 +++++++++++++++ include/asm-sparc/atomic.h | 3 +++ include/asm-sparc64/atomic.h | 10 ++++++++++ include/asm-v850/atomic.h | 16 ++++++++++++++++ include/asm-x86_64/atomic.h | 19 +++++++++++++++++++ include/asm-xtensa/atomic.h | 19 +++++++++++++++++++ 21 files changed, 293 insertions(+) (limited to 'include') diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index a666080..36505bb 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -179,6 +179,16 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index 8ab1689..75b8027 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -173,6 +173,17 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #endif /* __LINUX_ARM_ARCH__ */ +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_add(i, v) (void) atomic_add_return(i, v) #define atomic_inc(v) (void) atomic_add_return(1, v) #define atomic_sub(i, v) (void) atomic_sub_return(i, v) diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h index 54b24ea..a47cadc 100644 --- a/include/asm-arm26/atomic.h +++ b/include/asm-arm26/atomic.h @@ -76,6 +76,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (ret != u) + v->counter += a; + local_irq_restore(flags); + + return ret != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) { unsigned long flags; diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h index 45891f7d..683b05a 100644 --- a/include/asm-cris/atomic.h +++ b/include/asm-cris/atomic.h @@ -136,6 +136,20 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + cris_atomic_save(v, flags); + ret = v->counter; + if (ret != u) + v->counter += a; + cris_atomic_restore(v, flags); + return ret != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index 55f06a0..f6539ff 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -416,4 +416,14 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #endif /* _ASM_ATOMIC_H */ diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h index d504392..f23d868 100644 --- a/include/asm-h8300/atomic.h +++ b/include/asm-h8300/atomic.h @@ -95,6 +95,20 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (ret != u) + v->counter += a; + local_irq_restore(flags); + return ret != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) { __asm__ __volatile__("stc ccr,r1l\n\t" diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 5ff698e..c68557a 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -217,6 +217,25 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 593d3da..2fbebf8 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -90,6 +90,16 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index b821975..e3c962e 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -141,6 +141,16 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h index 2fd33a5..3c1cc15 100644 --- a/include/asm-m68knommu/atomic.h +++ b/include/asm-m68knommu/atomic.h @@ -130,6 +130,16 @@ static inline int atomic_sub_return(int i, atomic_t * v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 4fba0d0..2c87b41 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -289,6 +289,25 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 52c9a45..983e9a2 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h @@ -166,6 +166,25 @@ static __inline__ int atomic_read(const atomic_t *v) /* exported interface */ #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 37205fa..ec4b144 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -166,6 +166,31 @@ static __inline__ int atomic_dec_return(atomic_t *v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index 631014d..b3bd4f6 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -200,6 +200,16 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) #define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__before_atomic_inc() smp_mb() diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index a148c76..aabfd33 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -101,6 +101,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (ret != u) + v->counter += a; + local_irq_restore(flags); + + return ret != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long flags; diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h index 6eeb57b..927a2bc 100644 --- a/include/asm-sh64/atomic.h +++ b/include/asm-sh64/atomic.h @@ -113,6 +113,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (ret != u) + v->counter += a; + local_irq_restore(flags); + + return ret != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long flags; diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index 52bdd1a..62bec7a 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h @@ -20,6 +20,7 @@ typedef struct { volatile int counter; } atomic_t; extern int __atomic_add_return(int, atomic_t *); extern int atomic_cmpxchg(atomic_t *, int, int); +extern int atomic_add_unless(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); #define atomic_read(v) ((v)->counter) @@ -49,6 +50,8 @@ extern void atomic_set(atomic_t *, int); #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + /* This is the old 24-bit implementation. It's still used internally * by some sparc-specific code, notably the semaphore implementation. */ diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index 3a0b438..8198c3d 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -72,6 +72,16 @@ extern int atomic64_sub_ret(int, atomic64_t *); #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + /* Atomic operations are already serializing */ #ifdef CONFIG_SMP #define smp_mb__before_atomic_dec() membar_storeload_loadload(); diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h index e497166..bede317 100644 --- a/include/asm-v850/atomic.h +++ b/include/asm-v850/atomic.h @@ -104,6 +104,22 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (ret != u) + v->counter += a; + local_irq_restore(flags); + + return ret != u; +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + /* Atomic operations are already serializing on ARM */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 75c8a1e..0866ef6 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -362,6 +362,25 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index cd40c5e..3670cc7 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -225,6 +225,25 @@ static inline int atomic_sub_return(int i, atomic_t * v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned int all_f = -1; -- cgit v1.1 From 20dcae32439384b6863c626bb3b2a09bed65b33e Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Sun, 13 Nov 2005 16:07:33 -0800 Subject: [PATCH] aio: remove kioctx from mm_struct Sync iocbs have a life cycle that don't need a kioctx. Their retrying, if any, is done in the context of their owner who has allocated them on the stack. The sole user of a sync iocb's ctx reference was aio_complete() checking for an elevated iocb ref count that could never happen. No path which grabs an iocb ref has access to sync iocbs. If we were to implement sync iocb cancelation it would be done by the owner of the iocb using its on-stack reference. Removing this chunk from aio_complete allows us to remove the entire kioctx instance from mm_struct, reducing its size by a third. On a i386 testing box the slab size went from 768 to 504 bytes and from 5 to 8 per page. Signed-off-by: Zach Brown Acked-by: Benjamin LaHaise Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/aio.h | 2 +- include/linux/init_task.h | 1 - include/linux/sched.h | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/aio.h b/include/linux/aio.h index 403d71d..9e0ae87 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -124,7 +124,7 @@ struct kiocb { (x)->ki_users = 1; \ (x)->ki_key = KIOCB_SYNC_KEY; \ (x)->ki_filp = (filp); \ - (x)->ki_ctx = &tsk->active_mm->default_kioctx; \ + (x)->ki_ctx = NULL; \ (x)->ki_cancel = NULL; \ (x)->ki_dtor = NULL; \ (x)->ki_obj.tsk = tsk; \ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 68ab5f2..dcfd2ec 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -51,7 +51,6 @@ .page_table_lock = SPIN_LOCK_UNLOCKED, \ .mmlist = LIST_HEAD_INIT(name.mmlist), \ .cpu_vm_mask = CPU_MASK_ALL, \ - .default_kioctx = INIT_KIOCTX(name.default_kioctx, name), \ } #define INIT_SIGNALS(sig) { \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 41df813..2038bd2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -357,7 +357,6 @@ struct mm_struct { /* aio bits */ rwlock_t ioctx_list_lock; struct kioctx *ioctx_list; - struct kioctx default_kioctx; }; struct sighand_struct { -- cgit v1.1 From 5ef1c49f8f9f0d6b5b8d57bb4b66c605a3d65876 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Sun, 13 Nov 2005 16:07:35 -0800 Subject: [PATCH] aio: don't ref kioctx after decref in put_ioctx put_ioctx's refcount debugging was doing an atomic_read after dropping its reference when it wasn't the last ref, leaving a tiny race for another freeing thread to sneak into. This shifts the debugging before the ops, uses BUG_ON, and reformats the defines a little. Sadly, moving to inlines increased the code size but this change decreases the code size by a whole 9 bytes :) Signed-off-by: Zach Brown Cc: Benjamin LaHaise Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/aio.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/aio.h b/include/linux/aio.h index 9e0ae87..49fd376 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -210,8 +210,15 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id); int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, struct iocb *iocb)); -#define get_ioctx(kioctx) do { if (unlikely(atomic_read(&(kioctx)->users) <= 0)) BUG(); atomic_inc(&(kioctx)->users); } while (0) -#define put_ioctx(kioctx) do { if (unlikely(atomic_dec_and_test(&(kioctx)->users))) __put_ioctx(kioctx); else if (unlikely(atomic_read(&(kioctx)->users) < 0)) BUG(); } while (0) +#define get_ioctx(kioctx) do { \ + BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0)); \ + atomic_inc(&(kioctx)->users); \ +} while (0) +#define put_ioctx(kioctx) do { \ + BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0)); \ + if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ + __put_ioctx(kioctx); \ +} while (0) #define in_aio() !is_sync_wait(current->io_wait) /* may be used for debugging */ -- cgit v1.1 From ff6ed4063da39e6a30ce904005e4ed17385e2739 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sun, 13 Nov 2005 16:07:38 -0800 Subject: [PATCH] acct.h needs jiffies.h allnoconfig: In file included from fs/super.c:28: include/linux/acct.h:173: warning: `TICK_NSEC' is not defined Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/acct.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/acct.h b/include/linux/acct.h index 93c5b3c..9a66401 100644 --- a/include/linux/acct.h +++ b/include/linux/acct.h @@ -16,6 +16,8 @@ #define _LINUX_ACCT_H #include +#include + #include #include -- cgit v1.1 From 3f39894d1b5c253b10fcb8fbbbcf65a330f6cdc7 Mon Sep 17 00:00:00 2001 From: George Anzinger Date: Sun, 13 Nov 2005 16:07:44 -0800 Subject: [PATCH] timespec: normalize off by one errors It would appear that the timespec normalize code has an off by one error. Found in three places. Thanks to Ben for spotting. Signed-off-by: George Anzinger Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/time.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/time.h b/include/linux/time.h index 8e83f4e..bfbe92d 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -101,7 +101,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran); static inline void set_normalized_timespec (struct timespec *ts, time_t sec, long nsec) { - while (nsec > NSEC_PER_SEC) { + while (nsec >= NSEC_PER_SEC) { nsec -= NSEC_PER_SEC; ++sec; } -- cgit v1.1 From 800d3c6f90b61cc82b09db635b59c00b1c460728 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sun, 13 Nov 2005 16:07:48 -0800 Subject: [PATCH] v4l: (943) added secam l video standard - Added SECAM L' video standard - SECAM L' is a Secam variant that requires special config. This patch adds support on V4L core. Requires aditional patches on tuners to support. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/videodev2.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index a114fff..1cded68 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -636,6 +636,7 @@ typedef __u64 v4l2_std_id; #define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000) #define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000) #define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000) +#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000) /* ATSC/HDTV */ #define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000) -- cgit v1.1 From 8069695c9e7da7ab7cd8ee749e8d5aa9e6e0660b Mon Sep 17 00:00:00 2001 From: Ricardo Cerqueira Date: Sun, 13 Nov 2005 16:07:49 -0800 Subject: [PATCH] v4l: (935) Moved common IR stuff to ir-common.c - The pinnacle handler & remote are common to saa7134 PCI boards and em28xx USB boards, so the keymap was moved to ir-common and the keyhandler is back to ir-kbd-i2c - request_module("ir-kbd-i2c") is no longer necessary at saa7134-core since saa7134.ko now depends on ir-kbd-i2c.ko to get the keyhandler Signed-off-by: Ricardo Cerqueira Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/media/ir-common.h | 1 + include/media/ir-kbd-i2c.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/include/media/ir-common.h b/include/media/ir-common.h index 0f1ba95..ad3e9bb 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -49,6 +49,7 @@ struct ir_input_state { extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE]; +extern IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE]; extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE]; diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h index 00fa57e..730f21e 100644 --- a/include/media/ir-kbd-i2c.h +++ b/include/media/ir-kbd-i2c.h @@ -19,4 +19,6 @@ struct IR_i2c { char phys[32]; int (*get_key)(struct IR_i2c*, u32*, u32*); }; + +int get_key_pinnacle(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw); #endif -- cgit v1.1 From 633323ffffae91c3f22a08e0185fbfd3fae2a825 Mon Sep 17 00:00:00 2001 From: Bill Pechter Date: Sun, 13 Nov 2005 16:07:50 -0800 Subject: [PATCH] v4l:: (936) Support for sabrent bt848 version Support for Sabrent bt848 version. Signed-off-by: Bill Pechter Signed-off-by: Nickolay V. Shmyrev Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/media/tuner.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/media/tuner.h b/include/media/tuner.h index 9184e53..faa0f8e 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -113,6 +113,7 @@ #define TUNER_PHILIPS_TD1316 67 #define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ +#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ #define NOTUNER 0 #define PAL 1 /* PAL_BG */ -- cgit v1.1 From b2f0648ffda862d53f04f0a05979f3fa530d63c9 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sun, 13 Nov 2005 16:07:55 -0800 Subject: [PATCH] v4l: (945) adds a new include for internal v4l2 ioctls and api Adds a new include for internal V4L2 ioctls and API Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/media/v4l2-common.h | 110 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 include/media/v4l2-common.h (limited to 'include') diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h new file mode 100644 index 0000000..d3fd481 --- /dev/null +++ b/include/media/v4l2-common.h @@ -0,0 +1,110 @@ +/* + v4l2 common internal API header + + This header contains internal shared ioctl definitions for use by the + internal low-level v4l2 drivers. + Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal + define, + + Copyright (C) 2005 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef V4L2_COMMON_H_ +#define V4L2_COMMON_H_ + +/* VIDIOC_INT_AUDIO_CLOCK_FREQ */ +enum v4l2_audio_clock_freq { + V4L2_AUDCLK_32_KHZ = 32000, + V4L2_AUDCLK_441_KHZ = 44100, + V4L2_AUDCLK_48_KHZ = 48000, +}; + +/* VIDIOC_INT_G_REGISTER and VIDIOC_INT_S_REGISTER */ +struct v4l2_register { + u32 i2c_id; /* I2C driver ID of the I2C chip. 0 for the I2C adapter. */ + unsigned long reg; + u32 val; +}; + +/* VIDIOC_INT_DECODE_VBI_LINE */ +struct v4l2_decode_vbi_line { + u32 is_second_field; /* Set to 0 for the first (odd) field, + set to 1 for the second (even) field. */ + u8 *p; /* Pointer to the sliced VBI data from the decoder. + On exit points to the start of the payload. */ + u32 line; /* Line number of the sliced VBI data (1-23) */ + u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */ +}; + +/* VIDIOC_INT_G_CHIP_IDENT: identifies the actual chip installed on the board */ +enum v4l2_chip_ident { + /* general idents: reserved range 0-49 */ + V4L2_IDENT_UNKNOWN = 0, + + /* module saa7115: reserved range 100-149 */ + V4L2_IDENT_SAA7114 = 104, + V4L2_IDENT_SAA7115 = 105, + + /* module saa7127: reserved range 150-199 */ + V4L2_IDENT_SAA7127 = 157, + V4L2_IDENT_SAA7129 = 159, + + /* module cx25840: reserved range 200-249 */ + V4L2_IDENT_CX25840 = 240, + V4L2_IDENT_CX25841 = 241, + V4L2_IDENT_CX25842 = 242, + V4L2_IDENT_CX25843 = 243, +}; + +/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ +#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register) +#define VIDIOC_INT_G_REGISTER _IOWR('d', 101, struct v4l2_register) + +/* Reset the I2C chip */ +#define VIDIOC_INT_RESET _IO ('d', 102) + +/* Set the frequency of the audio clock output. + Used to slave an audio processor to the video decoder, ensuring that audio + and video remain synchronized. */ +#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOR ('d', 103, enum v4l2_audio_clock_freq) + +/* Video decoders that support sliced VBI need to implement this ioctl. + Field p of the v4l2_sliced_vbi_line struct is set to the start of the VBI + data that was generated by the decoder. The driver then parses the sliced + VBI data and sets the other fields in the struct accordingly. The pointer p + is updated to point to the start of the payload which can be copied + verbatim into the data field of the v4l2_sliced_vbi_data struct. If no + valid VBI data was found, then the type field is set to 0 on return. */ +#define VIDIOC_INT_DECODE_VBI_LINE _IOWR('d', 104, struct v4l2_decode_vbi_line) + +/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is + filled with the data packets that should be output. Note that if you set + the line field to 0, then that VBI signal is disabled. */ +#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data) + +/* Used to obtain the sliced VBI packet from a readback register. Not all + video decoders support this. If no data is available because the readback + register contains invalid or erroneous data -EIO is returned. Note that + you must fill in the 'id' member and the 'field' member (to determine + whether CC data from the first or second field should be obtained). */ +#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data *) + +/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can + be made. */ +#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident *) + +#endif /* V4L2_COMMON_H_ */ -- cgit v1.1 From aeec46b97a7975fd983219177980c58ed4fd607c Mon Sep 17 00:00:00 2001 From: Martin Waitz Date: Sun, 13 Nov 2005 16:08:13 -0800 Subject: [PATCH] DocBook: allow to mark structure members private Many structures contain both an internal part and one which is part of the API to other modules. With this patch it is possible to only include these public members in the kernel documentation. Signed-off-by: Martin Waitz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/usb.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/usb.h b/include/linux/usb.h index 748d043..856d232 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -819,7 +819,7 @@ typedef void (*usb_complete_t)(struct urb *, struct pt_regs *); */ struct urb { - /* private, usb core and host controller only fields in the urb */ + /* private: usb core and host controller only fields in the urb */ struct kref kref; /* reference count of the URB */ spinlock_t lock; /* lock for the URB */ void *hcpriv; /* private data for host controller */ @@ -827,7 +827,7 @@ struct urb atomic_t use_count; /* concurrent submissions counter */ u8 reject; /* submissions will fail */ - /* public, documented fields in the urb that can be used by drivers */ + /* public: documented fields in the urb that can be used by drivers */ struct list_head urb_list; /* list head for use by the urb's * current owner */ struct usb_device *dev; /* (in) pointer to associated device */ @@ -1045,7 +1045,7 @@ struct usb_sg_request { size_t bytes; /* - * members below are private to usbcore, + * members below are private: to usbcore, * and are not provided for driver access! */ spinlock_t lock; -- cgit v1.1 From 5cd16ee934eafca74a6bb790328950cec68a8b78 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 11 Nov 2005 14:25:24 +1100 Subject: [PATCH] powerpc: Merge page.h Merge asm-ppc/page.h and asm-ppc64/page.h into asm-powerpc/page.h, asm-powerpc/page_32.h and asm-powerpc/page_64.h Built for PPC (common_defconfig), with ARCH=powerpc, mostly built with ARCH=ppc (other things break the build). Built and booted on P5 LPAR for PPC64 with ARCH=ppc/powerpc (pseries_defconfig). Mostly built for iSeries powerpc. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- include/asm-powerpc/page.h | 179 ++++++++++++++++++++++++++++++++++++++++++ include/asm-powerpc/page_32.h | 38 +++++++++ include/asm-powerpc/page_64.h | 174 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 391 insertions(+) create mode 100644 include/asm-powerpc/page.h create mode 100644 include/asm-powerpc/page_32.h create mode 100644 include/asm-powerpc/page_64.h (limited to 'include') diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h new file mode 100644 index 0000000..18c1e5e --- /dev/null +++ b/include/asm-powerpc/page.h @@ -0,0 +1,179 @@ +#ifndef _ASM_POWERPC_PAGE_H +#define _ASM_POWERPC_PAGE_H + +/* + * Copyright (C) 2001,2005 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifdef __KERNEL__ +#include +#include + +/* + * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software + * page size. When using 64K pages however, whether we are really supporting + * 64K pages in HW or not is irrelevant to those definitions. + */ +#ifdef CONFIG_PPC_64K_PAGES +#define PAGE_SHIFT 16 +#else +#define PAGE_SHIFT 12 +#endif + +#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) + +/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ +#define __HAVE_ARCH_GATE_AREA 1 + +/* + * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we + * assign PAGE_MASK to a larger type it gets extended the way we want + * (i.e. with 1s in the high bits) + */ +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) + +#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START) +#define KERNELBASE PAGE_OFFSET + +#ifdef CONFIG_DISCONTIGMEM +#define page_to_pfn(page) discontigmem_page_to_pfn(page) +#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn) +#define pfn_valid(pfn) discontigmem_pfn_valid(pfn) +#endif + +#ifdef CONFIG_FLATMEM +#define pfn_to_page(pfn) (mem_map + (pfn)) +#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#endif + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) + +/* + * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, + * and needs to be executable. This means the whole heap ends + * up being executable. + */ +#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#ifdef __powerpc64__ +#include +#else +#include +#endif + +/* align addr on a size boundary - adjust address up/down if needed */ +#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) +#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) + +/* align addr on a size boundary - adjust address up if needed */ +#define _ALIGN(addr,size) _ALIGN_UP(addr,size) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) + +#ifndef __ASSEMBLY__ + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* These are used to make use of C type-checking. */ + +/* PTE level */ +typedef struct { pte_basic_t pte; } pte_t; +#define pte_val(x) ((x).pte) +#define __pte(x) ((pte_t) { (x) }) + +/* 64k pages additionally define a bigger "real PTE" type that gathers + * the "second half" part of the PTE for pseudo 64k pages + */ +#ifdef CONFIG_PPC_64K_PAGES +typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; +#else +typedef struct { pte_t pte; } real_pte_t; +#endif + +/* PMD level */ +typedef struct { unsigned long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) }) + +/* PUD level exusts only on 4k pages */ +#ifndef CONFIG_PPC_64K_PAGES +typedef struct { unsigned long pud; } pud_t; +#define pud_val(x) ((x).pud) +#define __pud(x) ((pud_t) { (x) }) +#endif + +/* PGD level */ +typedef struct { unsigned long pgd; } pgd_t; +#define pgd_val(x) ((x).pgd) +#define __pgd(x) ((pgd_t) { (x) }) + +/* Page protection bits */ +typedef struct { unsigned long pgprot; } pgprot_t; +#define pgprot_val(x) ((x).pgprot) +#define __pgprot(x) ((pgprot_t) { (x) }) + +#else + +/* + * .. while these make it easier on the compiler + */ + +typedef pte_basic_t pte_t; +#define pte_val(x) (x) +#define __pte(x) (x) + +#ifdef CONFIG_PPC_64K_PAGES +typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; +#else +typedef unsigned long real_pte_t; +#endif + + +typedef unsigned long pmd_t; +#define pmd_val(x) (x) +#define __pmd(x) (x) + +#ifndef CONFIG_PPC_64K_PAGES +typedef unsigned long pud_t; +#define pud_val(x) (x) +#define __pud(x) (x) +#endif + +typedef unsigned long pgd_t; +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +typedef unsigned long pgprot_t; +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif + +struct page; +extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); +extern void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *p); +extern int page_is_ram(unsigned long pfn); + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_POWERPC_PAGE_H */ diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h new file mode 100644 index 0000000..3522130 --- /dev/null +++ b/include/asm-powerpc/page_32.h @@ -0,0 +1,38 @@ +#ifndef _ASM_POWERPC_PAGE_32_H +#define _ASM_POWERPC_PAGE_32_H + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 + +#ifndef __ASSEMBLY__ +/* + * The basic type of a PTE - 64 bits for those CPUs with > 32 bit + * physical addressing. For now this just the IBM PPC440. + */ +#ifdef CONFIG_PTE_64BIT +typedef unsigned long long pte_basic_t; +#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ +#define PTE_FMT "%16Lx" +#else +typedef unsigned long pte_basic_t; +#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ +#define PTE_FMT "%.8lx" +#endif + +struct page; +extern void clear_pages(void *page, int order); +static inline void clear_page(void *page) { clear_pages(page, 0); } +extern void copy_page(void *to, void *from); + +/* Pure 2^n version of get_order */ +extern __inline__ int get_order(unsigned long size) +{ + int lz; + + size = (size-1) >> PAGE_SHIFT; + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size)); + return 32 - lz; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_PAGE_32_H */ diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h new file mode 100644 index 0000000..c16f106 --- /dev/null +++ b/include/asm-powerpc/page_64.h @@ -0,0 +1,174 @@ +#ifndef _ASM_POWERPC_PAGE_64_H +#define _ASM_POWERPC_PAGE_64_H + +/* + * Copyright (C) 2001 PPC64 Team, IBM Corp + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* + * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux + * specific, every notion of page number shared with the firmware, TCEs, + * iommu, etc... still uses a page size of 4K. + */ +#define HW_PAGE_SHIFT 12 +#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT) +#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1)) + +/* + * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and + * HW_PAGE_SHIFT, that is 4K pages. + */ +#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT) + +#define REGION_SIZE 4UL +#define REGION_SHIFT 60UL +#define REGION_MASK (((1UL<> REGION_SHIFT) +#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) +#define USER_REGION_ID (0UL) +#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) + +/* Segment size */ +#define SID_SHIFT 28 +#define SID_MASK 0xfffffffffUL +#define ESID_MASK 0xfffffffff0000000UL +#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) + +#ifndef __ASSEMBLY__ +#include + +typedef unsigned long pte_basic_t; + +static __inline__ void clear_page(void *addr) +{ + unsigned long lines, line_size; + + line_size = ppc64_caches.dline_size; + lines = ppc64_caches.dlines_per_page; + + __asm__ __volatile__( + "mtctr %1 # clear_page\n\ +1: dcbz 0,%0\n\ + add %0,%0,%3\n\ + bdnz+ 1b" + : "=r" (addr) + : "r" (lines), "0" (addr), "r" (line_size) + : "ctr", "memory"); +} + +extern void copy_4K_page(void *to, void *from); + +#ifdef CONFIG_PPC_64K_PAGES +static inline void copy_page(void *to, void *from) +{ + unsigned int i; + for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) { + copy_4K_page(to, from); + to += 4096; + from += 4096; + } +} +#else /* CONFIG_PPC_64K_PAGES */ +static inline void copy_page(void *to, void *from) +{ + copy_4K_page(to, from); +} +#endif /* CONFIG_PPC_64K_PAGES */ + +/* Log 2 of page table size */ +extern u64 ppc64_pft_size; + +/* Large pages size */ +extern unsigned int HPAGE_SHIFT; +#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_HUGETLB_PAGE + +#define HTLB_AREA_SHIFT 40 +#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) +#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) + +#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ + - (1U << GET_ESID(addr))) & 0xffff) +#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ + - (1U << GET_HTLB_AREA(addr))) & 0xffff) + +#define ARCH_HAS_HUGEPAGE_ONLY_RANGE +#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE +#define ARCH_HAS_SETCLEAR_HUGE_PTE + +#define touches_hugepage_low_range(mm, addr, len) \ + (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas) +#define touches_hugepage_high_range(mm, addr, len) \ + (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas) + +#define __within_hugepage_low_range(addr, len, segmask) \ + ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) +#define within_hugepage_low_range(addr, len) \ + __within_hugepage_low_range((addr), (len), \ + current->mm->context.low_htlb_areas) +#define __within_hugepage_high_range(addr, len, zonemask) \ + ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)) +#define within_hugepage_high_range(addr, len) \ + __within_hugepage_high_range((addr), (len), \ + current->mm->context.high_htlb_areas) + +#define is_hugepage_only_range(mm, addr, len) \ + (touches_hugepage_high_range((mm), (addr), (len)) || \ + touches_hugepage_low_range((mm), (addr), (len))) +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA + +#define in_hugepage_area(context, addr) \ + (cpu_has_feature(CPU_FTR_16M_PAGE) && \ + ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \ + ( ((addr) < 0x100000000L) && \ + ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) ) + +#else /* !CONFIG_HUGETLB_PAGE */ + +#define in_hugepage_area(mm, addr) 0 + +#endif /* !CONFIG_HUGETLB_PAGE */ + +#ifdef MODULE +#define __page_aligned __attribute__((__aligned__(PAGE_SIZE))) +#else +#define __page_aligned \ + __attribute__((__aligned__(PAGE_SIZE), \ + __section__(".data.page_aligned"))) +#endif + +#define VM_DATA_DEFAULT_FLAGS \ + (test_thread_flag(TIF_32BIT) ? \ + VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) + +/* + * This is the default if a program doesn't have a PT_GNU_STACK + * program header entry. The PPC64 ELF ABI has a non executable stack + * stack by default, so in the absense of a PT_GNU_STACK program header + * we turn execute permission off. + */ +#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define VM_STACK_DEFAULT_FLAGS \ + (test_thread_flag(TIF_32BIT) ? \ + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) + +#include + +#endif /* _ASM_POWERPC_PAGE_64_H */ -- cgit v1.1 From c5e24354efae9f962e0e369d875d45f47e0bb9aa Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 12 Nov 2005 00:06:05 +1100 Subject: [PATCH] powerpc: Turn cpu_irq_down into kexec_cpu_down We currently have a ppc_md member called cpu_irq_down, which disables IRQs for the cpu in question. The only caller of cpu_irq_down is the kexec code. On pSeries we need to do more than just teardown IRQs at kexec time, so rename the ppc_md member to kexec_cpu_down and expand it. The pSeries code needs to know, and other platforms might too, whether we're doing a crash shutdown (ie. panicking) or a regular kexec, so add a flag for that. The pSeries implementation of kexec_cpu_down does an unregister VPA call, which tells the Hypervisor to stop writing stuff into our pacas. Without this we can get weird memory corruption bugs when we kexec, caused by the Hypervisor writing into the first kernel's pacas which happens to be somewhere interesting in the second kernel's memory. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- include/asm-powerpc/machdep.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 5670f0c..c011abb 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -93,7 +93,9 @@ struct machdep_calls { void (*init_IRQ)(void); int (*get_irq)(struct pt_regs *); - void (*cpu_irq_down)(int secondary); +#ifdef CONFIG_KEXEC + void (*kexec_cpu_down)(int crash_shutdown, int secondary); +#endif /* PCI stuff */ /* Called after scanning the bus, before allocating resources */ -- cgit v1.1 From 593e537b93193d1696809817533ce5ad510445b1 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 12 Nov 2005 00:06:06 +1100 Subject: [PATCH] powerpc: Export htab start/end via device tree The userspace kexec-tools need to know the location of the htab on non-lpar machines, as well as the end of the kernel. Export via the device tree. NB. This patch has been updated to use "linux,x" property names. You may need to update your kexec-tools to match. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- include/asm-powerpc/kexec.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h index 062ab9b..c72ffc7 100644 --- a/include/asm-powerpc/kexec.h +++ b/include/asm-powerpc/kexec.h @@ -40,6 +40,7 @@ extern note_buf_t crash_notes[]; #ifdef __powerpc64__ extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ +extern void __init kexec_setup(void); #else struct kimage; extern void machine_kexec_simple(struct kimage *image); -- cgit v1.1 From 0c37ec2aa88bd8a6aaeb284ff5c86f4c6d8e8469 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 14 Nov 2005 14:55:58 +1100 Subject: [PATCH] powerpc: vdso fixes (take #2) This fixes various errors in the new functions added in the vDSO's, I've now verified all functions on both 32 and 64 bits vDSOs. It also fix a sign extension bug getting the initial time of day at boot that could cause the monotonic clock value to be completely on bogus for 64 bits applications (with either the vDSO or the syscall) on powermacs. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/vdso_datapage.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-powerpc/vdso_datapage.h b/include/asm-powerpc/vdso_datapage.h index fc323b5..411832d 100644 --- a/include/asm-powerpc/vdso_datapage.h +++ b/include/asm-powerpc/vdso_datapage.h @@ -73,7 +73,7 @@ struct vdso_data { /* those additional ones don't have to be located anywhere * special as they were not part of the original systemcfg */ - __s64 wtom_clock_sec; /* Wall to monotonic clock */ + __s32 wtom_clock_sec; /* Wall to monotonic clock */ __s32 wtom_clock_nsec; __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ -- cgit v1.1 From c55377ee73f6efeb373ae06f6e918d87660b4852 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 14 Nov 2005 17:22:01 +1100 Subject: powerpc: Move a bunch of ppc64 headers to include/asm-powerpc ... and also delete some that are no longer used because we already had an include/asm-powerpc version of the header. Signed-off-by: Paul Mackerras --- include/asm-powerpc/btext.h | 27 +++ include/asm-powerpc/delay.h | 55 ++++++ include/asm-powerpc/eeh.h | 364 ++++++++++++++++++++++++++++++++++++++++ include/asm-powerpc/floppy.h | 105 ++++++++++++ include/asm-powerpc/hvconsole.h | 49 ++++++ include/asm-powerpc/hvcserver.h | 57 +++++++ include/asm-powerpc/nvram.h | 120 +++++++++++++ include/asm-powerpc/serial.h | 18 ++ include/asm-ppc/nvram.h | 73 -------- include/asm-ppc64/btext.h | 27 --- include/asm-ppc64/delay.h | 48 ------ include/asm-ppc64/eeh.h | 364 ---------------------------------------- include/asm-ppc64/floppy.h | 106 ------------ include/asm-ppc64/hvconsole.h | 49 ------ include/asm-ppc64/hvcserver.h | 57 ------- include/asm-ppc64/nvram.h | 116 ------------- include/asm-ppc64/prom.h | 220 ------------------------ include/asm-ppc64/serial.h | 23 --- include/asm-ppc64/system.h | 310 ---------------------------------- 19 files changed, 795 insertions(+), 1393 deletions(-) create mode 100644 include/asm-powerpc/btext.h create mode 100644 include/asm-powerpc/delay.h create mode 100644 include/asm-powerpc/eeh.h create mode 100644 include/asm-powerpc/floppy.h create mode 100644 include/asm-powerpc/hvconsole.h create mode 100644 include/asm-powerpc/hvcserver.h create mode 100644 include/asm-powerpc/nvram.h create mode 100644 include/asm-powerpc/serial.h delete mode 100644 include/asm-ppc/nvram.h delete mode 100644 include/asm-ppc64/btext.h delete mode 100644 include/asm-ppc64/delay.h delete mode 100644 include/asm-ppc64/eeh.h delete mode 100644 include/asm-ppc64/floppy.h delete mode 100644 include/asm-ppc64/hvconsole.h delete mode 100644 include/asm-ppc64/hvcserver.h delete mode 100644 include/asm-ppc64/nvram.h delete mode 100644 include/asm-ppc64/prom.h delete mode 100644 include/asm-ppc64/serial.h delete mode 100644 include/asm-ppc64/system.h (limited to 'include') diff --git a/include/asm-powerpc/btext.h b/include/asm-powerpc/btext.h new file mode 100644 index 0000000..71cce36 --- /dev/null +++ b/include/asm-powerpc/btext.h @@ -0,0 +1,27 @@ +/* + * Definitions for using the procedures in btext.c. + * + * Benjamin Herrenschmidt + */ +#ifndef __PPC_BTEXT_H +#define __PPC_BTEXT_H +#ifdef __KERNEL__ + +extern void btext_clearscreen(void); +extern void btext_flushscreen(void); + +extern int boot_text_mapped; + +extern int btext_initialize(struct device_node *np); + +extern void map_boot_text(void); +extern void init_boot_display(void); +extern void btext_update_display(unsigned long phys, int width, int height, + int depth, int pitch); + +extern void btext_drawchar(char c); +extern void btext_drawstring(const char *str); +extern void btext_drawhex(unsigned long v); + +#endif /* __KERNEL__ */ +#endif /* __PPC_BTEXT_H */ diff --git a/include/asm-powerpc/delay.h b/include/asm-powerpc/delay.h new file mode 100644 index 0000000..1492aa9 --- /dev/null +++ b/include/asm-powerpc/delay.h @@ -0,0 +1,55 @@ +#ifndef _ASM_POWERPC_DELAY_H +#define _ASM_POWERPC_DELAY_H + +/* + * Copyright 1996, Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan, + * Anton Blanchard. + */ + +extern unsigned long tb_ticks_per_usec; + +#ifdef CONFIG_PPC64 +/* define these here to prevent circular dependencies */ +/* these instructions control the thread priority on multi-threaded cpus */ +#define __HMT_low() asm volatile("or 1,1,1") +#define __HMT_medium() asm volatile("or 2,2,2") +#else +#define __HMT_low() +#define __HMT_medium() +#endif + +#define __barrier() asm volatile("" ::: "memory") + +static inline unsigned long __get_tb(void) +{ + unsigned long rval; + + asm volatile("mftb %0" : "=r" (rval)); + return rval; +} + +static inline void __delay(unsigned long loops) +{ + unsigned long start = __get_tb(); + + while((__get_tb() - start) < loops) + __HMT_low(); + __HMT_medium(); + __barrier(); +} + +static inline void udelay(unsigned long usecs) +{ + unsigned long loops = tb_ticks_per_usec * usecs; + + __delay(loops); +} + +#endif /* _ASM_POWERPC_DELAY_H */ diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h new file mode 100644 index 0000000..89f26ab --- /dev/null +++ b/include/asm-powerpc/eeh.h @@ -0,0 +1,364 @@ +/* + * eeh.h + * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PPC64_EEH_H +#define _PPC64_EEH_H + +#include +#include +#include +#include + +struct pci_dev; +struct device_node; + +#ifdef CONFIG_EEH + +/* Values for eeh_mode bits in device_node */ +#define EEH_MODE_SUPPORTED (1<<0) +#define EEH_MODE_NOCHECK (1<<1) +#define EEH_MODE_ISOLATED (1<<2) + +/* Max number of EEH freezes allowed before we consider the device + * to be permanently disabled. */ +#define EEH_MAX_ALLOWED_FREEZES 5 + +void __init eeh_init(void); +unsigned long eeh_check_failure(const volatile void __iomem *token, + unsigned long val); +int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev); +void __init pci_addr_cache_build(void); + +/** + * eeh_add_device_early + * eeh_add_device_late + * + * Perform eeh initialization for devices added after boot. + * Call eeh_add_device_early before doing any i/o to the + * device (including config space i/o). Call eeh_add_device_late + * to finish the eeh setup for this device. + */ +void eeh_add_device_early(struct device_node *); +void eeh_add_device_late(struct pci_dev *); + +/** + * eeh_remove_device - undo EEH setup for the indicated pci device + * @dev: pci device to be removed + * + * This routine should be called when a device is removed from + * a running system (e.g. by hotplug or dlpar). It unregisters + * the PCI device from the EEH subsystem. I/O errors affecting + * this device will no longer be detected after this call; thus, + * i/o errors affecting this slot may leave this device unusable. + */ +void eeh_remove_device(struct pci_dev *); + +/** + * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. + * + * If this macro yields TRUE, the caller relays to eeh_check_failure() + * which does further tests out of line. + */ +#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0) + +/* + * Reads from a device which has been isolated by EEH will return + * all 1s. This macro gives an all-1s value of the given size (in + * bytes: 1, 2, or 4) for comparing with the result of a read. + */ +#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8)) + +#else /* !CONFIG_EEH */ +static inline void eeh_init(void) { } + +static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) +{ + return val; +} + +static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) +{ + return 0; +} + +static inline void pci_addr_cache_build(void) { } + +static inline void eeh_add_device_early(struct device_node *dn) { } + +static inline void eeh_add_device_late(struct pci_dev *dev) { } + +static inline void eeh_remove_device(struct pci_dev *dev) { } + +#define EEH_POSSIBLE_ERROR(val, type) (0) +#define EEH_IO_ERROR_VALUE(size) (-1UL) +#endif /* CONFIG_EEH */ + +/* + * MMIO read/write operations with EEH support. + */ +static inline u8 eeh_readb(const volatile void __iomem *addr) +{ + u8 val = in_8(addr); + if (EEH_POSSIBLE_ERROR(val, u8)) + return eeh_check_failure(addr, val); + return val; +} +static inline void eeh_writeb(u8 val, volatile void __iomem *addr) +{ + out_8(addr, val); +} + +static inline u16 eeh_readw(const volatile void __iomem *addr) +{ + u16 val = in_le16(addr); + if (EEH_POSSIBLE_ERROR(val, u16)) + return eeh_check_failure(addr, val); + return val; +} +static inline void eeh_writew(u16 val, volatile void __iomem *addr) +{ + out_le16(addr, val); +} +static inline u16 eeh_raw_readw(const volatile void __iomem *addr) +{ + u16 val = in_be16(addr); + if (EEH_POSSIBLE_ERROR(val, u16)) + return eeh_check_failure(addr, val); + return val; +} +static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) { + volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr; + out_be16(vaddr, val); +} + +static inline u32 eeh_readl(const volatile void __iomem *addr) +{ + u32 val = in_le32(addr); + if (EEH_POSSIBLE_ERROR(val, u32)) + return eeh_check_failure(addr, val); + return val; +} +static inline void eeh_writel(u32 val, volatile void __iomem *addr) +{ + out_le32(addr, val); +} +static inline u32 eeh_raw_readl(const volatile void __iomem *addr) +{ + u32 val = in_be32(addr); + if (EEH_POSSIBLE_ERROR(val, u32)) + return eeh_check_failure(addr, val); + return val; +} +static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr) +{ + out_be32(addr, val); +} + +static inline u64 eeh_readq(const volatile void __iomem *addr) +{ + u64 val = in_le64(addr); + if (EEH_POSSIBLE_ERROR(val, u64)) + return eeh_check_failure(addr, val); + return val; +} +static inline void eeh_writeq(u64 val, volatile void __iomem *addr) +{ + out_le64(addr, val); +} +static inline u64 eeh_raw_readq(const volatile void __iomem *addr) +{ + u64 val = in_be64(addr); + if (EEH_POSSIBLE_ERROR(val, u64)) + return eeh_check_failure(addr, val); + return val; +} +static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr) +{ + out_be64(addr, val); +} + +#define EEH_CHECK_ALIGN(v,a) \ + ((((unsigned long)(v)) & ((a) - 1)) == 0) + +static inline void eeh_memset_io(volatile void __iomem *addr, int c, + unsigned long n) +{ + void *p = (void __force *)addr; + u32 lc = c; + lc |= lc << 8; + lc |= lc << 16; + + while(n && !EEH_CHECK_ALIGN(p, 4)) { + *((volatile u8 *)p) = c; + p++; + n--; + } + while(n >= 4) { + *((volatile u32 *)p) = lc; + p += 4; + n -= 4; + } + while(n) { + *((volatile u8 *)p) = c; + p++; + n--; + } + __asm__ __volatile__ ("sync" : : : "memory"); +} +static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src, + unsigned long n) +{ + void *vsrc = (void __force *) src; + void *destsave = dest; + unsigned long nsave = n; + + while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { + *((u8 *)dest) = *((volatile u8 *)vsrc); + __asm__ __volatile__ ("eieio" : : : "memory"); + vsrc++; + dest++; + n--; + } + while(n > 4) { + *((u32 *)dest) = *((volatile u32 *)vsrc); + __asm__ __volatile__ ("eieio" : : : "memory"); + vsrc += 4; + dest += 4; + n -= 4; + } + while(n) { + *((u8 *)dest) = *((volatile u8 *)vsrc); + __asm__ __volatile__ ("eieio" : : : "memory"); + vsrc++; + dest++; + n--; + } + __asm__ __volatile__ ("sync" : : : "memory"); + + /* Look for ffff's here at dest[n]. Assume that at least 4 bytes + * were copied. Check all four bytes. + */ + if ((nsave >= 4) && + (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) { + eeh_check_failure(src, (*((u32 *) destsave+nsave-4))); + } +} + +static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, + unsigned long n) +{ + void *vdest = (void __force *) dest; + + while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { + *((volatile u8 *)vdest) = *((u8 *)src); + src++; + vdest++; + n--; + } + while(n > 4) { + *((volatile u32 *)vdest) = *((volatile u32 *)src); + src += 4; + vdest += 4; + n-=4; + } + while(n) { + *((volatile u8 *)vdest) = *((u8 *)src); + src++; + vdest++; + n--; + } + __asm__ __volatile__ ("sync" : : : "memory"); +} + +#undef EEH_CHECK_ALIGN + +static inline u8 eeh_inb(unsigned long port) +{ + u8 val; + if (!_IO_IS_VALID(port)) + return ~0; + val = in_8((u8 __iomem *)(port+pci_io_base)); + if (EEH_POSSIBLE_ERROR(val, u8)) + return eeh_check_failure((void __iomem *)(port), val); + return val; +} + +static inline void eeh_outb(u8 val, unsigned long port) +{ + if (_IO_IS_VALID(port)) + out_8((u8 __iomem *)(port+pci_io_base), val); +} + +static inline u16 eeh_inw(unsigned long port) +{ + u16 val; + if (!_IO_IS_VALID(port)) + return ~0; + val = in_le16((u16 __iomem *)(port+pci_io_base)); + if (EEH_POSSIBLE_ERROR(val, u16)) + return eeh_check_failure((void __iomem *)(port), val); + return val; +} + +static inline void eeh_outw(u16 val, unsigned long port) +{ + if (_IO_IS_VALID(port)) + out_le16((u16 __iomem *)(port+pci_io_base), val); +} + +static inline u32 eeh_inl(unsigned long port) +{ + u32 val; + if (!_IO_IS_VALID(port)) + return ~0; + val = in_le32((u32 __iomem *)(port+pci_io_base)); + if (EEH_POSSIBLE_ERROR(val, u32)) + return eeh_check_failure((void __iomem *)(port), val); + return val; +} + +static inline void eeh_outl(u32 val, unsigned long port) +{ + if (_IO_IS_VALID(port)) + out_le32((u32 __iomem *)(port+pci_io_base), val); +} + +/* in-string eeh macros */ +static inline void eeh_insb(unsigned long port, void * buf, int ns) +{ + _insb((u8 __iomem *)(port+pci_io_base), buf, ns); + if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8)) + eeh_check_failure((void __iomem *)(port), *(u8*)buf); +} + +static inline void eeh_insw_ns(unsigned long port, void * buf, int ns) +{ + _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns); + if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16)) + eeh_check_failure((void __iomem *)(port), *(u16*)buf); +} + +static inline void eeh_insl_ns(unsigned long port, void * buf, int nl) +{ + _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl); + if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) + eeh_check_failure((void __iomem *)(port), *(u32*)buf); +} + +#endif /* _PPC64_EEH_H */ diff --git a/include/asm-powerpc/floppy.h b/include/asm-powerpc/floppy.h new file mode 100644 index 0000000..64276a3 --- /dev/null +++ b/include/asm-powerpc/floppy.h @@ -0,0 +1,105 @@ +/* + * Architecture specific parts of the Floppy driver + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 + */ +#ifndef __ASM_POWERPC_FLOPPY_H +#define __ASM_POWERPC_FLOPPY_H + +#include +#include + +#define fd_inb(port) inb_p(port) +#define fd_outb(value,port) outb_p(value,port) + +#define fd_enable_dma() enable_dma(FLOPPY_DMA) +#define fd_disable_dma() disable_dma(FLOPPY_DMA) +#define fd_request_dma() request_dma(FLOPPY_DMA, "floppy") +#define fd_free_dma() free_dma(FLOPPY_DMA) +#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) +#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode) +#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count) +#define fd_enable_irq() enable_irq(FLOPPY_IRQ) +#define fd_disable_irq() disable_irq(FLOPPY_IRQ) +#define fd_cacheflush(addr,size) /* nothing */ +#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \ + SA_INTERRUPT|SA_SAMPLE_RANDOM, \ + "floppy", NULL) +#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); + +#ifdef CONFIG_PCI + +#include + +#define fd_dma_setup(addr,size,mode,io) powerpc_fd_dma_setup(addr,size,mode,io) + +static __inline__ int powerpc_fd_dma_setup(char *addr, unsigned long size, + int mode, int io) +{ + static unsigned long prev_size; + static dma_addr_t bus_addr = 0; + static char *prev_addr; + static int prev_dir; + int dir; + + dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; + + if (bus_addr + && (addr != prev_addr || size != prev_size || dir != prev_dir)) { + /* different from last time -- unmap prev */ + pci_unmap_single(NULL, bus_addr, prev_size, prev_dir); + bus_addr = 0; + } + + if (!bus_addr) /* need to map it */ + bus_addr = pci_map_single(NULL, addr, size, dir); + + /* remember this one as prev */ + prev_addr = addr; + prev_size = size; + prev_dir = dir; + + fd_clear_dma_ff(); + fd_cacheflush(addr, size); + fd_set_dma_mode(mode); + set_dma_addr(FLOPPY_DMA, bus_addr); + fd_set_dma_count(size); + virtual_dma_port = io; + fd_enable_dma(); + + return 0; +} + +#endif /* CONFIG_PCI */ + +__inline__ void virtual_dma_init(void) +{ + /* Nothing to do on PowerPC */ +} + +static int FDC1 = 0x3f0; +static int FDC2 = -1; + +/* + * Again, the CMOS information not available + */ +#define FLOPPY0_TYPE 6 +#define FLOPPY1_TYPE 0 + +#define N_FDC 2 /* Don't change this! */ +#define N_DRIVE 8 + +#define FLOPPY_MOTOR_MASK 0xf0 + +/* + * The PowerPC has no problems with floppy DMA crossing 64k borders. + */ +#define CROSS_64KB(a,s) (0) + +#define EXTRA_FLOPPY_PARAMS + +#endif /* __ASM_POWERPC_FLOPPY_H */ diff --git a/include/asm-powerpc/hvconsole.h b/include/asm-powerpc/hvconsole.h new file mode 100644 index 0000000..6da93ce --- /dev/null +++ b/include/asm-powerpc/hvconsole.h @@ -0,0 +1,49 @@ +/* + * hvconsole.h + * Copyright (C) 2004 Ryan S Arnold, IBM Corporation + * + * LPAR console support. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PPC64_HVCONSOLE_H +#define _PPC64_HVCONSOLE_H + +/* + * This is the max number of console adapters that can/will be found as + * console devices on first stage console init. Any number beyond this range + * can't be used as a console device but is still a valid tty device. + */ +#define MAX_NR_HVC_CONSOLES 16 + +/* implemented by a low level driver */ +struct hv_ops { + int (*get_chars)(uint32_t vtermno, char *buf, int count); + int (*put_chars)(uint32_t vtermno, const char *buf, int count); +}; +extern int hvc_get_chars(uint32_t vtermno, char *buf, int count); +extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count); + +struct hvc_struct; + +/* Register a vterm and a slot index for use as a console (console_init) */ +extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); +/* register a vterm for hvc tty operation (module_init or hotplug add) */ +extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq, + struct hv_ops *ops); +/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ +extern int __devexit hvc_remove(struct hvc_struct *hp); +#endif /* _PPC64_HVCONSOLE_H */ diff --git a/include/asm-powerpc/hvcserver.h b/include/asm-powerpc/hvcserver.h new file mode 100644 index 0000000..aecba96 --- /dev/null +++ b/include/asm-powerpc/hvcserver.h @@ -0,0 +1,57 @@ +/* + * hvcserver.h + * Copyright (C) 2004 Ryan S Arnold, IBM Corporation + * + * PPC64 virtual I/O console server support. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PPC64_HVCSERVER_H +#define _PPC64_HVCSERVER_H + +#include + +/* Converged Location Code length */ +#define HVCS_CLC_LENGTH 79 + +/** + * hvcs_partner_info - an element in a list of partner info + * @node: list_head denoting this partner_info struct's position in the list of + * partner info. + * @unit_address: The partner unit address of this entry. + * @partition_ID: The partner partition ID of this entry. + * @location_code: The converged location code of this entry + 1 char for the + * null-term. + * + * This structure outlines the format that partner info is presented to a caller + * of the hvcs partner info fetching functions. These are strung together into + * a list using linux kernel lists. + */ +struct hvcs_partner_info { + struct list_head node; + uint32_t unit_address; + uint32_t partition_ID; + char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */ +}; + +extern int hvcs_free_partner_info(struct list_head *head); +extern int hvcs_get_partner_info(uint32_t unit_address, + struct list_head *head, unsigned long *pi_buff); +extern int hvcs_register_connection(uint32_t unit_address, + uint32_t p_partition_ID, uint32_t p_unit_address); +extern int hvcs_free_connection(uint32_t unit_address); + +#endif /* _PPC64_HVCSERVER_H */ diff --git a/include/asm-powerpc/nvram.h b/include/asm-powerpc/nvram.h new file mode 100644 index 0000000..1858244 --- /dev/null +++ b/include/asm-powerpc/nvram.h @@ -0,0 +1,120 @@ +/* + * NVRAM definitions and access functions. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_POWERPC_NVRAM_H +#define _ASM_POWERPC_NVRAM_H + +#define NVRW_CNT 0x20 +#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */ +#define NVRAM_BLOCK_LEN 16 +#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN) +#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN) + +#define NVRAM_AS0 0x74 +#define NVRAM_AS1 0x75 +#define NVRAM_DATA 0x77 + + +/* RTC Offsets */ + +#define MOTO_RTC_SECONDS 0x1FF9 +#define MOTO_RTC_MINUTES 0x1FFA +#define MOTO_RTC_HOURS 0x1FFB +#define MOTO_RTC_DAY_OF_WEEK 0x1FFC +#define MOTO_RTC_DAY_OF_MONTH 0x1FFD +#define MOTO_RTC_MONTH 0x1FFE +#define MOTO_RTC_YEAR 0x1FFF +#define MOTO_RTC_CONTROLA 0x1FF8 +#define MOTO_RTC_CONTROLB 0x1FF9 + +#define NVRAM_SIG_SP 0x02 /* support processor */ +#define NVRAM_SIG_OF 0x50 /* open firmware config */ +#define NVRAM_SIG_FW 0x51 /* general firmware */ +#define NVRAM_SIG_HW 0x52 /* hardware (VPD) */ +#define NVRAM_SIG_FLIP 0x5a /* Apple flip/flop header */ +#define NVRAM_SIG_APPL 0x5f /* Apple "system" (???) */ +#define NVRAM_SIG_SYS 0x70 /* system env vars */ +#define NVRAM_SIG_CFG 0x71 /* config data */ +#define NVRAM_SIG_ELOG 0x72 /* error log */ +#define NVRAM_SIG_VEND 0x7e /* vendor defined */ +#define NVRAM_SIG_FREE 0x7f /* Free space */ +#define NVRAM_SIG_OS 0xa0 /* OS defined */ +#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */ + +/* If change this size, then change the size of NVNAME_LEN */ +struct nvram_header { + unsigned char signature; + unsigned char checksum; + unsigned short length; + char name[12]; +}; + +struct nvram_partition { + struct list_head partition; + struct nvram_header header; + unsigned int index; +}; + + +extern int nvram_write_error_log(char * buff, int length, unsigned int err_type); +extern int nvram_read_error_log(char * buff, int length, unsigned int * err_type); +extern int nvram_clear_error_log(void); +extern struct nvram_partition *nvram_find_partition(int sig, const char *name); + +extern int pSeries_nvram_init(void); +extern int pmac_nvram_init(void); +extern int mmio_nvram_init(void); + +/* PowerMac specific nvram stuffs */ + +enum { + pmac_nvram_OF, /* Open Firmware partition */ + pmac_nvram_XPRAM, /* MacOS XPRAM partition */ + pmac_nvram_NR /* MacOS Name Registry partition */ +}; + +/* Return partition offset in nvram */ +extern int pmac_get_partition(int partition); + +/* Direct access to XPRAM on PowerMacs */ +extern u8 pmac_xpram_read(int xpaddr); +extern void pmac_xpram_write(int xpaddr, u8 data); + +/* Synchronize NVRAM */ +extern void nvram_sync(void); + +/* Normal access to NVRAM */ +extern unsigned char nvram_read_byte(int i); +extern void nvram_write_byte(unsigned char c, int i); + +/* Some offsets in XPRAM */ +#define PMAC_XPRAM_MACHINE_LOC 0xe4 +#define PMAC_XPRAM_SOUND_VOLUME 0x08 + +/* Machine location structure in PowerMac XPRAM */ +struct pmac_machine_location { + unsigned int latitude; /* 2+30 bit Fractional number */ + unsigned int longitude; /* 2+30 bit Fractional number */ + unsigned int delta; /* mix of GMT delta and DLS */ +}; + +/* + * /dev/nvram ioctls + * + * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is + * definitely obsolete. Do not use it if you can avoid it + */ + +#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \ + _IOWR('p', 0x40, int) + +#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */ +#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */ + +#endif /* _ASM_POWERPC_NVRAM_H */ diff --git a/include/asm-powerpc/serial.h b/include/asm-powerpc/serial.h new file mode 100644 index 0000000..b273d63 --- /dev/null +++ b/include/asm-powerpc/serial.h @@ -0,0 +1,18 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_POWERPC_SERIAL_H +#define _ASM_POWERPC_SERIAL_H + +/* + * Serial ports are not listed here, because they are discovered + * through the device tree. + */ + +/* Default baud base if not found in device-tree */ +#define BASE_BAUD ( 1843200 / 16 ) + +#endif /* _PPC64_SERIAL_H */ diff --git a/include/asm-ppc/nvram.h b/include/asm-ppc/nvram.h deleted file mode 100644 index 31ef16e..0000000 --- a/include/asm-ppc/nvram.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * PreP compliant NVRAM access - */ - -#ifdef __KERNEL__ -#ifndef _PPC_NVRAM_H -#define _PPC_NVRAM_H - -#define NVRAM_AS0 0x74 -#define NVRAM_AS1 0x75 -#define NVRAM_DATA 0x77 - - -/* RTC Offsets */ - -#define MOTO_RTC_SECONDS 0x1FF9 -#define MOTO_RTC_MINUTES 0x1FFA -#define MOTO_RTC_HOURS 0x1FFB -#define MOTO_RTC_DAY_OF_WEEK 0x1FFC -#define MOTO_RTC_DAY_OF_MONTH 0x1FFD -#define MOTO_RTC_MONTH 0x1FFE -#define MOTO_RTC_YEAR 0x1FFF -#define MOTO_RTC_CONTROLA 0x1FF8 -#define MOTO_RTC_CONTROLB 0x1FF9 - -/* PowerMac specific nvram stuffs */ - -enum { - pmac_nvram_OF, /* Open Firmware partition */ - pmac_nvram_XPRAM, /* MacOS XPRAM partition */ - pmac_nvram_NR /* MacOS Name Registry partition */ -}; - -/* Return partition offset in nvram */ -extern int pmac_get_partition(int partition); - -/* Direct access to XPRAM on PowerMacs */ -extern u8 pmac_xpram_read(int xpaddr); -extern void pmac_xpram_write(int xpaddr, u8 data); - -/* Synchronize NVRAM */ -extern void nvram_sync(void); - -/* Normal access to NVRAM */ -extern unsigned char nvram_read_byte(int i); -extern void nvram_write_byte(unsigned char c, int i); - -/* Some offsets in XPRAM */ -#define PMAC_XPRAM_MACHINE_LOC 0xe4 -#define PMAC_XPRAM_SOUND_VOLUME 0x08 - -/* Machine location structure in PowerMac XPRAM */ -struct pmac_machine_location { - unsigned int latitude; /* 2+30 bit Fractional number */ - unsigned int longitude; /* 2+30 bit Fractional number */ - unsigned int delta; /* mix of GMT delta and DLS */ -}; - -/* - * /dev/nvram ioctls - * - * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is - * definitely obsolete. Do not use it if you can avoid it - */ - -#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \ - _IOWR('p', 0x40, int) - -#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */ -#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */ - -#endif -#endif /* __KERNEL__ */ diff --git a/include/asm-ppc64/btext.h b/include/asm-ppc64/btext.h deleted file mode 100644 index 71cce36..0000000 --- a/include/asm-ppc64/btext.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Definitions for using the procedures in btext.c. - * - * Benjamin Herrenschmidt - */ -#ifndef __PPC_BTEXT_H -#define __PPC_BTEXT_H -#ifdef __KERNEL__ - -extern void btext_clearscreen(void); -extern void btext_flushscreen(void); - -extern int boot_text_mapped; - -extern int btext_initialize(struct device_node *np); - -extern void map_boot_text(void); -extern void init_boot_display(void); -extern void btext_update_display(unsigned long phys, int width, int height, - int depth, int pitch); - -extern void btext_drawchar(char c); -extern void btext_drawstring(const char *str); -extern void btext_drawhex(unsigned long v); - -#endif /* __KERNEL__ */ -#endif /* __PPC_BTEXT_H */ diff --git a/include/asm-ppc64/delay.h b/include/asm-ppc64/delay.h deleted file mode 100644 index 05f198c..0000000 --- a/include/asm-ppc64/delay.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef _PPC64_DELAY_H -#define _PPC64_DELAY_H - -/* - * Copyright 1996, Paul Mackerras. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan, - * Anton Blanchard. - */ - -extern unsigned long tb_ticks_per_usec; - -/* define these here to prevent circular dependencies */ -#define __HMT_low() asm volatile("or 1,1,1") -#define __HMT_medium() asm volatile("or 2,2,2") -#define __barrier() asm volatile("":::"memory") - -static inline unsigned long __get_tb(void) -{ - unsigned long rval; - - asm volatile("mftb %0" : "=r" (rval)); - return rval; -} - -static inline void __delay(unsigned long loops) -{ - unsigned long start = __get_tb(); - - while((__get_tb()-start) < loops) - __HMT_low(); - __HMT_medium(); - __barrier(); -} - -static inline void udelay(unsigned long usecs) -{ - unsigned long loops = tb_ticks_per_usec * usecs; - - __delay(loops); -} - -#endif /* _PPC64_DELAY_H */ diff --git a/include/asm-ppc64/eeh.h b/include/asm-ppc64/eeh.h deleted file mode 100644 index 89f26ab..0000000 --- a/include/asm-ppc64/eeh.h +++ /dev/null @@ -1,364 +0,0 @@ -/* - * eeh.h - * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _PPC64_EEH_H -#define _PPC64_EEH_H - -#include -#include -#include -#include - -struct pci_dev; -struct device_node; - -#ifdef CONFIG_EEH - -/* Values for eeh_mode bits in device_node */ -#define EEH_MODE_SUPPORTED (1<<0) -#define EEH_MODE_NOCHECK (1<<1) -#define EEH_MODE_ISOLATED (1<<2) - -/* Max number of EEH freezes allowed before we consider the device - * to be permanently disabled. */ -#define EEH_MAX_ALLOWED_FREEZES 5 - -void __init eeh_init(void); -unsigned long eeh_check_failure(const volatile void __iomem *token, - unsigned long val); -int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev); -void __init pci_addr_cache_build(void); - -/** - * eeh_add_device_early - * eeh_add_device_late - * - * Perform eeh initialization for devices added after boot. - * Call eeh_add_device_early before doing any i/o to the - * device (including config space i/o). Call eeh_add_device_late - * to finish the eeh setup for this device. - */ -void eeh_add_device_early(struct device_node *); -void eeh_add_device_late(struct pci_dev *); - -/** - * eeh_remove_device - undo EEH setup for the indicated pci device - * @dev: pci device to be removed - * - * This routine should be called when a device is removed from - * a running system (e.g. by hotplug or dlpar). It unregisters - * the PCI device from the EEH subsystem. I/O errors affecting - * this device will no longer be detected after this call; thus, - * i/o errors affecting this slot may leave this device unusable. - */ -void eeh_remove_device(struct pci_dev *); - -/** - * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. - * - * If this macro yields TRUE, the caller relays to eeh_check_failure() - * which does further tests out of line. - */ -#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0) - -/* - * Reads from a device which has been isolated by EEH will return - * all 1s. This macro gives an all-1s value of the given size (in - * bytes: 1, 2, or 4) for comparing with the result of a read. - */ -#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8)) - -#else /* !CONFIG_EEH */ -static inline void eeh_init(void) { } - -static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) -{ - return val; -} - -static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) -{ - return 0; -} - -static inline void pci_addr_cache_build(void) { } - -static inline void eeh_add_device_early(struct device_node *dn) { } - -static inline void eeh_add_device_late(struct pci_dev *dev) { } - -static inline void eeh_remove_device(struct pci_dev *dev) { } - -#define EEH_POSSIBLE_ERROR(val, type) (0) -#define EEH_IO_ERROR_VALUE(size) (-1UL) -#endif /* CONFIG_EEH */ - -/* - * MMIO read/write operations with EEH support. - */ -static inline u8 eeh_readb(const volatile void __iomem *addr) -{ - u8 val = in_8(addr); - if (EEH_POSSIBLE_ERROR(val, u8)) - return eeh_check_failure(addr, val); - return val; -} -static inline void eeh_writeb(u8 val, volatile void __iomem *addr) -{ - out_8(addr, val); -} - -static inline u16 eeh_readw(const volatile void __iomem *addr) -{ - u16 val = in_le16(addr); - if (EEH_POSSIBLE_ERROR(val, u16)) - return eeh_check_failure(addr, val); - return val; -} -static inline void eeh_writew(u16 val, volatile void __iomem *addr) -{ - out_le16(addr, val); -} -static inline u16 eeh_raw_readw(const volatile void __iomem *addr) -{ - u16 val = in_be16(addr); - if (EEH_POSSIBLE_ERROR(val, u16)) - return eeh_check_failure(addr, val); - return val; -} -static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) { - volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr; - out_be16(vaddr, val); -} - -static inline u32 eeh_readl(const volatile void __iomem *addr) -{ - u32 val = in_le32(addr); - if (EEH_POSSIBLE_ERROR(val, u32)) - return eeh_check_failure(addr, val); - return val; -} -static inline void eeh_writel(u32 val, volatile void __iomem *addr) -{ - out_le32(addr, val); -} -static inline u32 eeh_raw_readl(const volatile void __iomem *addr) -{ - u32 val = in_be32(addr); - if (EEH_POSSIBLE_ERROR(val, u32)) - return eeh_check_failure(addr, val); - return val; -} -static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr) -{ - out_be32(addr, val); -} - -static inline u64 eeh_readq(const volatile void __iomem *addr) -{ - u64 val = in_le64(addr); - if (EEH_POSSIBLE_ERROR(val, u64)) - return eeh_check_failure(addr, val); - return val; -} -static inline void eeh_writeq(u64 val, volatile void __iomem *addr) -{ - out_le64(addr, val); -} -static inline u64 eeh_raw_readq(const volatile void __iomem *addr) -{ - u64 val = in_be64(addr); - if (EEH_POSSIBLE_ERROR(val, u64)) - return eeh_check_failure(addr, val); - return val; -} -static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr) -{ - out_be64(addr, val); -} - -#define EEH_CHECK_ALIGN(v,a) \ - ((((unsigned long)(v)) & ((a) - 1)) == 0) - -static inline void eeh_memset_io(volatile void __iomem *addr, int c, - unsigned long n) -{ - void *p = (void __force *)addr; - u32 lc = c; - lc |= lc << 8; - lc |= lc << 16; - - while(n && !EEH_CHECK_ALIGN(p, 4)) { - *((volatile u8 *)p) = c; - p++; - n--; - } - while(n >= 4) { - *((volatile u32 *)p) = lc; - p += 4; - n -= 4; - } - while(n) { - *((volatile u8 *)p) = c; - p++; - n--; - } - __asm__ __volatile__ ("sync" : : : "memory"); -} -static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src, - unsigned long n) -{ - void *vsrc = (void __force *) src; - void *destsave = dest; - unsigned long nsave = n; - - while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { - *((u8 *)dest) = *((volatile u8 *)vsrc); - __asm__ __volatile__ ("eieio" : : : "memory"); - vsrc++; - dest++; - n--; - } - while(n > 4) { - *((u32 *)dest) = *((volatile u32 *)vsrc); - __asm__ __volatile__ ("eieio" : : : "memory"); - vsrc += 4; - dest += 4; - n -= 4; - } - while(n) { - *((u8 *)dest) = *((volatile u8 *)vsrc); - __asm__ __volatile__ ("eieio" : : : "memory"); - vsrc++; - dest++; - n--; - } - __asm__ __volatile__ ("sync" : : : "memory"); - - /* Look for ffff's here at dest[n]. Assume that at least 4 bytes - * were copied. Check all four bytes. - */ - if ((nsave >= 4) && - (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) { - eeh_check_failure(src, (*((u32 *) destsave+nsave-4))); - } -} - -static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, - unsigned long n) -{ - void *vdest = (void __force *) dest; - - while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { - *((volatile u8 *)vdest) = *((u8 *)src); - src++; - vdest++; - n--; - } - while(n > 4) { - *((volatile u32 *)vdest) = *((volatile u32 *)src); - src += 4; - vdest += 4; - n-=4; - } - while(n) { - *((volatile u8 *)vdest) = *((u8 *)src); - src++; - vdest++; - n--; - } - __asm__ __volatile__ ("sync" : : : "memory"); -} - -#undef EEH_CHECK_ALIGN - -static inline u8 eeh_inb(unsigned long port) -{ - u8 val; - if (!_IO_IS_VALID(port)) - return ~0; - val = in_8((u8 __iomem *)(port+pci_io_base)); - if (EEH_POSSIBLE_ERROR(val, u8)) - return eeh_check_failure((void __iomem *)(port), val); - return val; -} - -static inline void eeh_outb(u8 val, unsigned long port) -{ - if (_IO_IS_VALID(port)) - out_8((u8 __iomem *)(port+pci_io_base), val); -} - -static inline u16 eeh_inw(unsigned long port) -{ - u16 val; - if (!_IO_IS_VALID(port)) - return ~0; - val = in_le16((u16 __iomem *)(port+pci_io_base)); - if (EEH_POSSIBLE_ERROR(val, u16)) - return eeh_check_failure((void __iomem *)(port), val); - return val; -} - -static inline void eeh_outw(u16 val, unsigned long port) -{ - if (_IO_IS_VALID(port)) - out_le16((u16 __iomem *)(port+pci_io_base), val); -} - -static inline u32 eeh_inl(unsigned long port) -{ - u32 val; - if (!_IO_IS_VALID(port)) - return ~0; - val = in_le32((u32 __iomem *)(port+pci_io_base)); - if (EEH_POSSIBLE_ERROR(val, u32)) - return eeh_check_failure((void __iomem *)(port), val); - return val; -} - -static inline void eeh_outl(u32 val, unsigned long port) -{ - if (_IO_IS_VALID(port)) - out_le32((u32 __iomem *)(port+pci_io_base), val); -} - -/* in-string eeh macros */ -static inline void eeh_insb(unsigned long port, void * buf, int ns) -{ - _insb((u8 __iomem *)(port+pci_io_base), buf, ns); - if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8)) - eeh_check_failure((void __iomem *)(port), *(u8*)buf); -} - -static inline void eeh_insw_ns(unsigned long port, void * buf, int ns) -{ - _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns); - if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16)) - eeh_check_failure((void __iomem *)(port), *(u16*)buf); -} - -static inline void eeh_insl_ns(unsigned long port, void * buf, int nl) -{ - _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl); - if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) - eeh_check_failure((void __iomem *)(port), *(u32*)buf); -} - -#endif /* _PPC64_EEH_H */ diff --git a/include/asm-ppc64/floppy.h b/include/asm-ppc64/floppy.h deleted file mode 100644 index 5c497b5..0000000 --- a/include/asm-ppc64/floppy.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Architecture specific parts of the Floppy driver - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995 - */ -#ifndef __ASM_PPC64_FLOPPY_H -#define __ASM_PPC64_FLOPPY_H - -#include -#include - -#define fd_inb(port) inb_p(port) -#define fd_outb(value,port) outb_p(value,port) - -#define fd_enable_dma() enable_dma(FLOPPY_DMA) -#define fd_disable_dma() disable_dma(FLOPPY_DMA) -#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy") -#define fd_free_dma() free_dma(FLOPPY_DMA) -#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) -#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode) -#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count) -#define fd_enable_irq() enable_irq(FLOPPY_IRQ) -#define fd_disable_irq() disable_irq(FLOPPY_IRQ) -#define fd_cacheflush(addr,size) /* nothing */ -#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \ - SA_INTERRUPT|SA_SAMPLE_RANDOM, \ - "floppy", NULL) -#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); - -#ifdef CONFIG_PCI - -#include - -#define fd_dma_setup(addr,size,mode,io) ppc64_fd_dma_setup(addr,size,mode,io) - -static __inline__ int -ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io) -{ - static unsigned long prev_size; - static dma_addr_t bus_addr = 0; - static char *prev_addr; - static int prev_dir; - int dir; - - dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; - - if (bus_addr - && (addr != prev_addr || size != prev_size || dir != prev_dir)) { - /* different from last time -- unmap prev */ - pci_unmap_single(NULL, bus_addr, prev_size, prev_dir); - bus_addr = 0; - } - - if (!bus_addr) /* need to map it */ { - bus_addr = pci_map_single(NULL, addr, size, dir); - } - - /* remember this one as prev */ - prev_addr = addr; - prev_size = size; - prev_dir = dir; - - fd_clear_dma_ff(); - fd_cacheflush(addr, size); - fd_set_dma_mode(mode); - set_dma_addr(FLOPPY_DMA, bus_addr); - fd_set_dma_count(size); - virtual_dma_port = io; - fd_enable_dma(); - - return 0; -} - -#endif /* CONFIG_PCI */ - -__inline__ void virtual_dma_init(void) -{ - /* Nothing to do on PowerPC */ -} - -static int FDC1 = 0x3f0; -static int FDC2 = -1; - -/* - * Again, the CMOS information not available - */ -#define FLOPPY0_TYPE 6 -#define FLOPPY1_TYPE 0 - -#define N_FDC 2 /* Don't change this! */ -#define N_DRIVE 8 - -#define FLOPPY_MOTOR_MASK 0xf0 - -/* - * The PowerPC has no problems with floppy DMA crossing 64k borders. - */ -#define CROSS_64KB(a,s) (0) - -#define EXTRA_FLOPPY_PARAMS - -#endif /* __ASM_PPC64_FLOPPY_H */ diff --git a/include/asm-ppc64/hvconsole.h b/include/asm-ppc64/hvconsole.h deleted file mode 100644 index 6da93ce..0000000 --- a/include/asm-ppc64/hvconsole.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * hvconsole.h - * Copyright (C) 2004 Ryan S Arnold, IBM Corporation - * - * LPAR console support. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _PPC64_HVCONSOLE_H -#define _PPC64_HVCONSOLE_H - -/* - * This is the max number of console adapters that can/will be found as - * console devices on first stage console init. Any number beyond this range - * can't be used as a console device but is still a valid tty device. - */ -#define MAX_NR_HVC_CONSOLES 16 - -/* implemented by a low level driver */ -struct hv_ops { - int (*get_chars)(uint32_t vtermno, char *buf, int count); - int (*put_chars)(uint32_t vtermno, const char *buf, int count); -}; -extern int hvc_get_chars(uint32_t vtermno, char *buf, int count); -extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count); - -struct hvc_struct; - -/* Register a vterm and a slot index for use as a console (console_init) */ -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); -/* register a vterm for hvc tty operation (module_init or hotplug add) */ -extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq, - struct hv_ops *ops); -/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ -extern int __devexit hvc_remove(struct hvc_struct *hp); -#endif /* _PPC64_HVCONSOLE_H */ diff --git a/include/asm-ppc64/hvcserver.h b/include/asm-ppc64/hvcserver.h deleted file mode 100644 index aecba96..0000000 --- a/include/asm-ppc64/hvcserver.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * hvcserver.h - * Copyright (C) 2004 Ryan S Arnold, IBM Corporation - * - * PPC64 virtual I/O console server support. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _PPC64_HVCSERVER_H -#define _PPC64_HVCSERVER_H - -#include - -/* Converged Location Code length */ -#define HVCS_CLC_LENGTH 79 - -/** - * hvcs_partner_info - an element in a list of partner info - * @node: list_head denoting this partner_info struct's position in the list of - * partner info. - * @unit_address: The partner unit address of this entry. - * @partition_ID: The partner partition ID of this entry. - * @location_code: The converged location code of this entry + 1 char for the - * null-term. - * - * This structure outlines the format that partner info is presented to a caller - * of the hvcs partner info fetching functions. These are strung together into - * a list using linux kernel lists. - */ -struct hvcs_partner_info { - struct list_head node; - uint32_t unit_address; - uint32_t partition_ID; - char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */ -}; - -extern int hvcs_free_partner_info(struct list_head *head); -extern int hvcs_get_partner_info(uint32_t unit_address, - struct list_head *head, unsigned long *pi_buff); -extern int hvcs_register_connection(uint32_t unit_address, - uint32_t p_partition_ID, uint32_t p_unit_address); -extern int hvcs_free_connection(uint32_t unit_address); - -#endif /* _PPC64_HVCSERVER_H */ diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h deleted file mode 100644 index def47d7..0000000 --- a/include/asm-ppc64/nvram.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * PreP compliant NVRAM access - * This needs to be updated for PPC64 - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _PPC64_NVRAM_H -#define _PPC64_NVRAM_H - -#define NVRW_CNT 0x20 -#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */ -#define NVRAM_BLOCK_LEN 16 -#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN) -#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN) - -#define NVRAM_AS0 0x74 -#define NVRAM_AS1 0x75 -#define NVRAM_DATA 0x77 - - -/* RTC Offsets */ - -#define MOTO_RTC_SECONDS 0x1FF9 -#define MOTO_RTC_MINUTES 0x1FFA -#define MOTO_RTC_HOURS 0x1FFB -#define MOTO_RTC_DAY_OF_WEEK 0x1FFC -#define MOTO_RTC_DAY_OF_MONTH 0x1FFD -#define MOTO_RTC_MONTH 0x1FFE -#define MOTO_RTC_YEAR 0x1FFF -#define MOTO_RTC_CONTROLA 0x1FF8 -#define MOTO_RTC_CONTROLB 0x1FF9 - -#define NVRAM_SIG_SP 0x02 /* support processor */ -#define NVRAM_SIG_OF 0x50 /* open firmware config */ -#define NVRAM_SIG_FW 0x51 /* general firmware */ -#define NVRAM_SIG_HW 0x52 /* hardware (VPD) */ -#define NVRAM_SIG_FLIP 0x5a /* Apple flip/flop header */ -#define NVRAM_SIG_APPL 0x5f /* Apple "system" (???) */ -#define NVRAM_SIG_SYS 0x70 /* system env vars */ -#define NVRAM_SIG_CFG 0x71 /* config data */ -#define NVRAM_SIG_ELOG 0x72 /* error log */ -#define NVRAM_SIG_VEND 0x7e /* vendor defined */ -#define NVRAM_SIG_FREE 0x7f /* Free space */ -#define NVRAM_SIG_OS 0xa0 /* OS defined */ -#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */ - -/* If change this size, then change the size of NVNAME_LEN */ -struct nvram_header { - unsigned char signature; - unsigned char checksum; - unsigned short length; - char name[12]; -}; - -struct nvram_partition { - struct list_head partition; - struct nvram_header header; - unsigned int index; -}; - - -extern int nvram_write_error_log(char * buff, int length, unsigned int err_type); -extern int nvram_read_error_log(char * buff, int length, unsigned int * err_type); -extern int nvram_clear_error_log(void); -extern struct nvram_partition *nvram_find_partition(int sig, const char *name); - -extern int pSeries_nvram_init(void); -extern int pmac_nvram_init(void); -extern int mmio_nvram_init(void); - -/* PowerMac specific nvram stuffs */ - -enum { - pmac_nvram_OF, /* Open Firmware partition */ - pmac_nvram_XPRAM, /* MacOS XPRAM partition */ - pmac_nvram_NR /* MacOS Name Registry partition */ -}; - -/* Return partition offset in nvram */ -extern int pmac_get_partition(int partition); - -/* Direct access to XPRAM on PowerMacs */ -extern u8 pmac_xpram_read(int xpaddr); -extern void pmac_xpram_write(int xpaddr, u8 data); - -/* Synchronize NVRAM */ -extern int nvram_sync(void); - -/* Some offsets in XPRAM */ -#define PMAC_XPRAM_MACHINE_LOC 0xe4 -#define PMAC_XPRAM_SOUND_VOLUME 0x08 - -/* Machine location structure in PowerMac XPRAM */ -struct pmac_machine_location { - unsigned int latitude; /* 2+30 bit Fractional number */ - unsigned int longitude; /* 2+30 bit Fractional number */ - unsigned int delta; /* mix of GMT delta and DLS */ -}; - -/* - * /dev/nvram ioctls - * - * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is - * definitely obsolete. Do not use it if you can avoid it - */ - -#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \ - _IOWR('p', 0x40, int) - -#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */ - -#endif /* _PPC64_NVRAM_H */ diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h deleted file mode 100644 index ddfe186..0000000 --- a/include/asm-ppc64/prom.h +++ /dev/null @@ -1,220 +0,0 @@ -#ifndef _PPC64_PROM_H -#define _PPC64_PROM_H - -/* - * Definitions for talking to the Open Firmware PROM on - * Power Macintosh computers. - * - * Copyright (C) 1996 Paul Mackerras. - * - * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include - -#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset)) -#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset)) -#define RELOC(x) (*PTRRELOC(&(x))) - -/* Definitions used by the flattened device tree */ -#define OF_DT_HEADER 0xd00dfeed /* marker */ -#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ -#define OF_DT_END_NODE 0x2 /* End node */ -#define OF_DT_PROP 0x3 /* Property: name off, size, - * content */ -#define OF_DT_NOP 0x4 /* nop */ -#define OF_DT_END 0x9 - -#define OF_DT_VERSION 0x10 - -/* - * This is what gets passed to the kernel by prom_init or kexec - * - * The dt struct contains the device tree structure, full pathes and - * property contents. The dt strings contain a separate block with just - * the strings for the property names, and is fully page aligned and - * self contained in a page, so that it can be kept around by the kernel, - * each property name appears only once in this page (cheap compression) - * - * the mem_rsvmap contains a map of reserved ranges of physical memory, - * passing it here instead of in the device-tree itself greatly simplifies - * the job of everybody. It's just a list of u64 pairs (base/size) that - * ends when size is 0 - */ -struct boot_param_header -{ - u32 magic; /* magic word OF_DT_HEADER */ - u32 totalsize; /* total size of DT block */ - u32 off_dt_struct; /* offset to structure */ - u32 off_dt_strings; /* offset to strings */ - u32 off_mem_rsvmap; /* offset to memory reserve map */ - u32 version; /* format version */ - u32 last_comp_version; /* last compatible version */ - /* version 2 fields below */ - u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ - /* version 3 fields below */ - u32 dt_strings_size; /* size of the DT strings block */ -}; - - - -typedef u32 phandle; -typedef u32 ihandle; - -struct address_range { - unsigned long space; - unsigned long address; - unsigned long size; -}; - -struct interrupt_info { - int line; - int sense; /* +ve/-ve logic, edge or level, etc. */ -}; - -struct pci_address { - u32 a_hi; - u32 a_mid; - u32 a_lo; -}; - -struct isa_address { - u32 a_hi; - u32 a_lo; -}; - -struct isa_range { - struct isa_address isa_addr; - struct pci_address pci_addr; - unsigned int size; -}; - -struct reg_property { - unsigned long address; - unsigned long size; -}; - -struct reg_property32 { - unsigned int address; - unsigned int size; -}; - -struct reg_property64 { - unsigned long address; - unsigned long size; -}; - -struct property { - char *name; - int length; - unsigned char *value; - struct property *next; -}; - -struct device_node { - char *name; - char *type; - phandle node; - phandle linux_phandle; - int n_addrs; - struct address_range *addrs; - int n_intrs; - struct interrupt_info *intrs; - char *full_name; - - struct property *properties; - struct device_node *parent; - struct device_node *child; - struct device_node *sibling; - struct device_node *next; /* next device of same type */ - struct device_node *allnext; /* next in list of all nodes */ - struct proc_dir_entry *pde; /* this node's proc directory */ - struct kref kref; - unsigned long _flags; - void *data; -#ifdef CONFIG_PPC_ISERIES - struct list_head Device_List; -#endif -}; - -extern struct device_node *of_chosen; - -/* flag descriptions */ -#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ - -#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) -#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) - -/* - * Until 32-bit ppc can add proc_dir_entries to its device_node - * definition, we cannot refer to pde, name_link, and addr_link - * in arch-independent code. - */ -#define HAVE_ARCH_DEVTREE_FIXUPS - -static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) -{ - dn->pde = de; -} - - -/* OBSOLETE: Old stlye node lookup */ -extern struct device_node *find_devices(const char *name); -extern struct device_node *find_type_devices(const char *type); -extern struct device_node *find_path_device(const char *path); -extern struct device_node *find_compatible_devices(const char *type, - const char *compat); -extern struct device_node *find_all_nodes(void); - -/* New style node lookup */ -extern struct device_node *of_find_node_by_name(struct device_node *from, - const char *name); -extern struct device_node *of_find_node_by_type(struct device_node *from, - const char *type); -extern struct device_node *of_find_compatible_node(struct device_node *from, - const char *type, const char *compat); -extern struct device_node *of_find_node_by_path(const char *path); -extern struct device_node *of_find_node_by_phandle(phandle handle); -extern struct device_node *of_find_all_nodes(struct device_node *prev); -extern struct device_node *of_get_parent(const struct device_node *node); -extern struct device_node *of_get_next_child(const struct device_node *node, - struct device_node *prev); -extern struct device_node *of_node_get(struct device_node *node); -extern void of_node_put(struct device_node *node); - -/* For scanning the flat device-tree at boot time */ -int __init of_scan_flat_dt(int (*it)(unsigned long node, - const char *uname, int depth, - void *data), - void *data); -void* __init of_get_flat_dt_prop(unsigned long node, const char *name, - unsigned long *size); - -/* For updating the device tree at runtime */ -extern void of_attach_node(struct device_node *); -extern void of_detach_node(const struct device_node *); - -/* Other Prototypes */ -extern unsigned long prom_init(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long); -extern void finish_device_tree(void); -extern void unflatten_device_tree(void); -extern void early_init_devtree(void *); -extern int device_is_compatible(struct device_node *device, const char *); -extern int machine_is_compatible(const char *compat); -extern unsigned char *get_property(struct device_node *node, const char *name, - int *lenp); -extern void print_properties(struct device_node *node); -extern int prom_n_addr_cells(struct device_node* np); -extern int prom_n_size_cells(struct device_node* np); -extern int prom_n_intr_cells(struct device_node* np); -extern void prom_get_irq_senses(unsigned char *senses, int off, int max); -extern int prom_add_property(struct device_node* np, struct property* prop); - -#endif /* _PPC64_PROM_H */ diff --git a/include/asm-ppc64/serial.h b/include/asm-ppc64/serial.h deleted file mode 100644 index d6bcb79..0000000 --- a/include/asm-ppc64/serial.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * include/asm-ppc64/serial.h - */ -#ifndef _PPC64_SERIAL_H -#define _PPC64_SERIAL_H - -/* - * This assumes you have a 1.8432 MHz clock for your UART. - * - * It'd be nice if someone built a serial card with a 24.576 MHz - * clock, since the 16550A is capable of handling a top speed of 1.5 - * megabits/second; but this requires the faster clock. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -/* Default baud base if not found in device-tree */ -#define BASE_BAUD ( 1843200 / 16 ) - -#endif /* _PPC64_SERIAL_H */ diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h deleted file mode 100644 index bf9a6ab..0000000 --- a/include/asm-ppc64/system.h +++ /dev/null @@ -1,310 +0,0 @@ -#ifndef __PPC64_SYSTEM_H -#define __PPC64_SYSTEM_H - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include - -/* - * Memory barrier. - * The sync instruction guarantees that all memory accesses initiated - * by this processor have been performed (with respect to all other - * mechanisms that access memory). The eieio instruction is a barrier - * providing an ordering (separately) for (a) cacheable stores and (b) - * loads and stores to non-cacheable memory (e.g. I/O devices). - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - * read_barrier_depends() prevents data-dependent loads being reordered - * across this point (nop on PPC). - * - * We have to use the sync instructions for mb(), since lwsync doesn't - * order loads with respect to previous stores. Lwsync is fine for - * rmb(), though. - * For wmb(), we use sync since wmb is used in drivers to order - * stores to system memory with respect to writes to the device. - * However, smp_wmb() can be a lighter-weight eieio barrier on - * SMP since it is only used to order updates to system memory. - */ -#define mb() __asm__ __volatile__ ("sync" : : : "memory") -#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") -#define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) -#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() eieio() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() __asm__ __volatile__("": : :"memory") -#define smp_rmb() __asm__ __volatile__("": : :"memory") -#define smp_wmb() __asm__ __volatile__("": : :"memory") -#define smp_read_barrier_depends() do { } while(0) -#endif /* CONFIG_SMP */ - -#ifdef __KERNEL__ -struct task_struct; -struct pt_regs; - -#ifdef CONFIG_DEBUGGER - -extern int (*__debugger)(struct pt_regs *regs); -extern int (*__debugger_ipi)(struct pt_regs *regs); -extern int (*__debugger_bpt)(struct pt_regs *regs); -extern int (*__debugger_sstep)(struct pt_regs *regs); -extern int (*__debugger_iabr_match)(struct pt_regs *regs); -extern int (*__debugger_dabr_match)(struct pt_regs *regs); -extern int (*__debugger_fault_handler)(struct pt_regs *regs); - -#define DEBUGGER_BOILERPLATE(__NAME) \ -static inline int __NAME(struct pt_regs *regs) \ -{ \ - if (unlikely(__ ## __NAME)) \ - return __ ## __NAME(regs); \ - return 0; \ -} - -DEBUGGER_BOILERPLATE(debugger) -DEBUGGER_BOILERPLATE(debugger_ipi) -DEBUGGER_BOILERPLATE(debugger_bpt) -DEBUGGER_BOILERPLATE(debugger_sstep) -DEBUGGER_BOILERPLATE(debugger_iabr_match) -DEBUGGER_BOILERPLATE(debugger_dabr_match) -DEBUGGER_BOILERPLATE(debugger_fault_handler) - -#ifdef CONFIG_XMON -extern void xmon_init(int enable); -#endif - -#else -static inline int debugger(struct pt_regs *regs) { return 0; } -static inline int debugger_ipi(struct pt_regs *regs) { return 0; } -static inline int debugger_bpt(struct pt_regs *regs) { return 0; } -static inline int debugger_sstep(struct pt_regs *regs) { return 0; } -static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } -static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } -static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } -#endif - -extern int set_dabr(unsigned long dabr); -extern void _exception(int signr, struct pt_regs *regs, int code, - unsigned long addr); -extern int fix_alignment(struct pt_regs *regs); -extern void bad_page_fault(struct pt_regs *regs, unsigned long address, - int sig); -extern void show_regs(struct pt_regs * regs); -extern void low_hash_fault(struct pt_regs *regs, unsigned long address); -extern int die(const char *str, struct pt_regs *regs, long err); - -extern int _get_PVR(void); -extern void giveup_fpu(struct task_struct *); -extern void disable_kernel_fp(void); -extern void flush_fp_to_thread(struct task_struct *); -extern void enable_kernel_fp(void); -extern void giveup_altivec(struct task_struct *); -extern void disable_kernel_altivec(void); -extern void enable_kernel_altivec(void); -extern int emulate_altivec(struct pt_regs *); -extern void cvt_fd(float *from, double *to, struct thread_struct *thread); -extern void cvt_df(double *from, float *to, struct thread_struct *thread); - -#ifdef CONFIG_ALTIVEC -extern void flush_altivec_to_thread(struct task_struct *); -#else -static inline void flush_altivec_to_thread(struct task_struct *t) -{ -} -#endif - -static inline void flush_spe_to_thread(struct task_struct *t) -{ -} - -extern int mem_init_done; /* set on boot once kmalloc can be called */ -extern unsigned long memory_limit; - -/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ -extern unsigned char e2a(unsigned char); - -extern struct task_struct *__switch_to(struct task_struct *, - struct task_struct *); -#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) - -struct thread_struct; -extern struct task_struct * _switch(struct thread_struct *prev, - struct thread_struct *next); - -extern unsigned long klimit; - -extern int powersave_nap; /* set if nap mode can be used in idle loop */ - -/* - * Atomic exchange - * - * Changes the memory location '*ptr' to be val and returns - * the previous value stored there. - * - * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64 - * is more like most of the other architectures. - */ -static __inline__ unsigned long -__xchg_u32(volatile unsigned int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - EIEIO_ON_SMP -"1: lwarx %0,0,%3 # __xchg_u32\n\ - stwcx. %2,0,%3\n\ -2: bne- 1b" - ISYNC_ON_SMP - : "=&r" (dummy), "=m" (*m) - : "r" (val), "r" (m) - : "cc", "memory"); - - return (dummy); -} - -static __inline__ unsigned long -__xchg_u64(volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - EIEIO_ON_SMP -"1: ldarx %0,0,%3 # __xchg_u64\n\ - stdcx. %2,0,%3\n\ -2: bne- 1b" - ISYNC_ON_SMP - : "=&r" (dummy), "=m" (*m) - : "r" (val), "r" (m) - : "cc", "memory"); - - return (dummy); -} - -/* - * This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid xchg(). - */ -extern void __xchg_called_with_bad_pointer(void); - -static __inline__ unsigned long -__xchg(volatile void *ptr, unsigned long x, unsigned int size) -{ - switch (size) { - case 4: - return __xchg_u32(ptr, x); - case 8: - return __xchg_u64(ptr, x); - } - __xchg_called_with_bad_pointer(); - return x; -} - -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -#define tas(ptr) (xchg((ptr),1)) - -#define __HAVE_ARCH_CMPXCHG 1 - -static __inline__ unsigned long -__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) -{ - unsigned int prev; - - __asm__ __volatile__ ( - EIEIO_ON_SMP -"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ - cmpw 0,%0,%3\n\ - bne- 2f\n\ - stwcx. %4,0,%2\n\ - bne- 1b" - ISYNC_ON_SMP - "\n\ -2:" - : "=&r" (prev), "=m" (*p) - : "r" (p), "r" (old), "r" (new), "m" (*p) - : "cc", "memory"); - - return prev; -} - -static __inline__ unsigned long -__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) -{ - unsigned long prev; - - __asm__ __volatile__ ( - EIEIO_ON_SMP -"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ - cmpd 0,%0,%3\n\ - bne- 2f\n\ - stdcx. %4,0,%2\n\ - bne- 1b" - ISYNC_ON_SMP - "\n\ -2:" - : "=&r" (prev), "=m" (*p) - : "r" (p), "r" (old), "r" (new), "m" (*p) - : "cc", "memory"); - - return prev; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __inline__ unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, - unsigned int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); - case 8: - return __cmpxchg_u64(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) - -/* - * We handle most unaligned accesses in hardware. On the other hand - * unaligned DMA can be very expensive on some ppc64 IO chips (it does - * powers of 2 writes until it reaches sufficient alignment). - * - * Based on this we disable the IP header alignment in network drivers. - */ -#define NET_IP_ALIGN 0 - -#define arch_align_stack(x) (x) - -extern unsigned long reloc_offset(void); - -#endif /* __KERNEL__ */ -#endif -- cgit v1.1 From 7568cb4ef6c507164b65b01f972a3bd026898ae1 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 14 Nov 2005 17:30:17 +1100 Subject: powerpc: Move most remaining ppc64 files over to arch/powerpc Also deletes files in arch/ppc64 that are no longer used now that we don't compile with ARCH=ppc64 any more. Signed-off-by: Paul Mackerras --- include/asm-ppc64/page.h | 328 ----------------------------------------------- 1 file changed, 328 deletions(-) delete mode 100644 include/asm-ppc64/page.h (limited to 'include') diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h deleted file mode 100644 index 3efc328..0000000 --- a/include/asm-ppc64/page.h +++ /dev/null @@ -1,328 +0,0 @@ -#ifndef _PPC64_PAGE_H -#define _PPC64_PAGE_H - -/* - * Copyright (C) 2001 PPC64 Team, IBM Corp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include - -/* - * We support either 4k or 64k software page size. When using 64k pages - * however, wether we are really supporting 64k pages in HW or not is - * irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12 - * as use of 64k pages remains a linux kernel specific, every notion of - * page number shared with the firmware, TCEs, iommu, etc... still assumes - * a page size of 4096. - */ -#ifdef CONFIG_PPC_64K_PAGES -#define PAGE_SHIFT 16 -#else -#define PAGE_SHIFT 12 -#endif - -#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -/* HW_PAGE_SHIFT is always 4k pages */ -#define HW_PAGE_SHIFT 12 -#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT) -#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1)) - -/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and - * HW_PAGE_SHIFT, that is 4k pages - */ -#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT) - -/* Segment size */ -#define SID_SHIFT 28 -#define SID_MASK 0xfffffffffUL -#define ESID_MASK 0xfffffffff0000000UL -#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) - -/* Large pages size */ - -#ifndef __ASSEMBLY__ -extern unsigned int HPAGE_SHIFT; -#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#endif /* __ASSEMBLY__ */ - -#ifdef CONFIG_HUGETLB_PAGE - - -#define HTLB_AREA_SHIFT 40 -#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) -#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) - -#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ - - (1U << GET_ESID(addr))) & 0xffff) -#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ - - (1U << GET_HTLB_AREA(addr))) & 0xffff) - -#define ARCH_HAS_HUGEPAGE_ONLY_RANGE -#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE -#define ARCH_HAS_SETCLEAR_HUGE_PTE - -#define touches_hugepage_low_range(mm, addr, len) \ - (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas) -#define touches_hugepage_high_range(mm, addr, len) \ - (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas) - -#define __within_hugepage_low_range(addr, len, segmask) \ - ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) -#define within_hugepage_low_range(addr, len) \ - __within_hugepage_low_range((addr), (len), \ - current->mm->context.low_htlb_areas) -#define __within_hugepage_high_range(addr, len, zonemask) \ - ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)) -#define within_hugepage_high_range(addr, len) \ - __within_hugepage_high_range((addr), (len), \ - current->mm->context.high_htlb_areas) - -#define is_hugepage_only_range(mm, addr, len) \ - (touches_hugepage_high_range((mm), (addr), (len)) || \ - touches_hugepage_low_range((mm), (addr), (len))) -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA - -#define in_hugepage_area(context, addr) \ - (cpu_has_feature(CPU_FTR_16M_PAGE) && \ - ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \ - ( ((addr) < 0x100000000L) && \ - ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) ) - -#else /* !CONFIG_HUGETLB_PAGE */ - -#define in_hugepage_area(mm, addr) 0 - -#endif /* !CONFIG_HUGETLB_PAGE */ - -/* align addr on a size boundary - adjust address up/down if needed */ -#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) -#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) - -/* align addr on a size boundary - adjust address up if needed */ -#define _ALIGN(addr,size) _ALIGN_UP(addr,size) - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ -#include - -#undef STRICT_MM_TYPECHECKS - -#define REGION_SIZE 4UL -#define REGION_SHIFT 60UL -#define REGION_MASK (((1UL<> REGION_SHIFT) -#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) -#define USER_REGION_ID (0UL) -#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) - -#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) - -#ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) -#define pfn_valid(pfn) ((pfn) < max_mapnr) -#endif - -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) - -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) - -/* - * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, - * and needs to be executable. This means the whole heap ends - * up being executable. - */ -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_DATA_DEFAULT_FLAGS \ - (test_thread_flag(TIF_32BIT) ? \ - VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) - -/* - * This is the default if a program doesn't have a PT_GNU_STACK - * program header entry. The PPC64 ELF ABI has a non executable stack - * stack by default, so in the absense of a PT_GNU_STACK program header - * we turn execute permission off. - */ -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_STACK_DEFAULT_FLAGS \ - (test_thread_flag(TIF_32BIT) ? \ - VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) - -#endif /* __KERNEL__ */ - -#include - -#endif /* _PPC64_PAGE_H */ -- cgit v1.1 From 493f25ef4087395891c99fcfe2c72e62e293e89f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 14 Nov 2005 17:32:50 +1100 Subject: powerpc: Fix 32-bit compile: PPC_MEMSTART was undeclared This defines PPC_MEMSTART as 0 because it is still used in a couple of places in the 32-bit code. Signed-off-by: Paul Mackerras --- include/asm-powerpc/page_32.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h index 3522130..7259cfd 100644 --- a/include/asm-powerpc/page_32.h +++ b/include/asm-powerpc/page_32.h @@ -3,6 +3,8 @@ #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 +#define PPC_MEMSTART 0 + #ifndef __ASSEMBLY__ /* * The basic type of a PTE - 64 bits for those CPUs with > 32 bit -- cgit v1.1 From 2c13b7cee045af689b36349c2bc6a9ed6e3d73fa Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Mon, 14 Nov 2005 14:14:16 -0500 Subject: [libata] minor fixes, new helpers - in ata_dev_identify(), don't assume that all devices are either ATA or ATAPI. In the future, this code will see port multipliers and other devices. - make a debugging printk less verbose - add new helper ata_qc_reinit() - add new helper BPRINTK() and port flag ATA_FLAG_DEBUGMSG, for fine-grained debugging use. --- include/linux/libata.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include') diff --git a/include/linux/libata.h b/include/linux/libata.h index ad59961..f2dbb68 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -59,6 +59,8 @@ #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) + #ifdef ATA_NDEBUG #define assert(expr) #else @@ -119,6 +121,7 @@ enum { ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once * proper HSM is in place. */ + ATA_FLAG_DEBUGMSG = (1 << 10), ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ @@ -659,6 +662,17 @@ static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, uns tf->device = ATA_DEVICE_OBS | ATA_DEV1; } +static inline void ata_qc_reinit(struct ata_queued_cmd *qc) +{ + qc->__sg = NULL; + qc->flags = 0; + qc->cursect = qc->cursg = qc->cursg_ofs = 0; + qc->nsect = 0; + qc->nbytes = qc->curbytes = 0; + + ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); +} + /** * ata_irq_on - Enable interrupts on a port. -- cgit v1.1 From c0400c4f5a08cfd1c657f7f616fcf1dfbd76a4d7 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 14 Nov 2005 15:21:41 -0800 Subject: [NETFILTER] nfnetlink: skip size check if size not specified (== 0) Skip sizecheck if the size of the attribute wasn't specified, ie. zero. Signed-off-by: Pablo Neira Ayuso Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter/nfnetlink.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 72975fa..8be2f84 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -154,11 +154,14 @@ extern void nfattr_parse(struct nfattr *tb[], int maxattr, #define nfattr_bad_size(tb, max, cta_min) \ ({ int __i, __res = 0; \ - for (__i=0; __i Date: Mon, 14 Nov 2005 15:24:59 -0800 Subject: [NETFILTER] nfnetlink: unconditionally require CAP_NET_ADMIN This patch unconditionally requires CAP_NET_ADMIN for all nfnetlink messages. It also removes the per-message cap_required field, since all existing subsystems use CAP_NET_ADMIN for all their messages anyway. Patrick McHardy owes me a beer if we ever need to re-introduce this. Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter/nfnetlink.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 8be2f84..934a247 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -112,7 +112,6 @@ struct nfnl_callback { int (*call)(struct sock *nl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nfattr *cda[], int *errp); - kernel_cap_t cap_required; /* capabilities required for this msg */ u_int16_t attr_count; /* number of nfattr's */ }; -- cgit v1.1 From a272e24cc8751d125f9582befed0213a2a2b270f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 14 Nov 2005 21:55:48 +1100 Subject: powerpc: Remove an extraneous and incorrect declaration of pmac_nvram_init. Signed-off-by: Paul Mackerras --- include/asm-powerpc/nvram.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/asm-powerpc/nvram.h b/include/asm-powerpc/nvram.h index 1858244..24bd8c2 100644 --- a/include/asm-powerpc/nvram.h +++ b/include/asm-powerpc/nvram.h @@ -68,7 +68,6 @@ extern int nvram_clear_error_log(void); extern struct nvram_partition *nvram_find_partition(int sig, const char *name); extern int pSeries_nvram_init(void); -extern int pmac_nvram_init(void); extern int mmio_nvram_init(void); /* PowerMac specific nvram stuffs */ -- cgit v1.1 From a2f1b424900715ed9d1699c3bb88a434a2b42bc0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Add 4GB DMA32 zone Add a new 4GB GFP_DMA32 zone between the GFP_DMA and GFP_NORMAL zones. As a bit of historical background: when the x86-64 port was originally designed we had some discussion if we should use a 16MB DMA zone like i386 or a 4GB DMA zone like IA64 or both. Both was ruled out at this point because it was in early 2.4 when VM is still quite shakey and had bad troubles even dealing with one DMA zone. We settled on the 16MB DMA zone mainly because we worried about older soundcards and the floppy. But this has always caused problems since then because device drivers had trouble getting enough DMA able memory. These days the VM works much better and the wide use of NUMA has proven it can deal with many zones successfully. So this patch adds both zones. This helps drivers who need a lot of memory below 4GB because their hardware is not accessing more (graphic drivers - proprietary and free ones, video frame buffer drivers, sound drivers etc.). Previously they could only use IOMMU+16MB GFP_DMA, which was not enough memory. Another common problem is that hardware who has full memory addressing for >4GB misses it for some control structures in memory (like transmit rings or other metadata). They tended to allocate memory in the 16MB GFP_DMA or the IOMMU/swiotlb then using pci_alloc_consistent, but that can tie up a lot of precious 16MB GFPDMA/IOMMU/swiotlb memory (even on AMD systems the IOMMU tends to be quite small) especially if you have many devices. With the new zone pci_alloc_consistent can just put this stuff into memory below 4GB which works better. One argument was still if the zone should be 4GB or 2GB. The main motivation for 2GB would be an unnamed not so unpopular hardware raid controller (mostly found in older machines from a particular four letter company) who has a strange 2GB restriction in firmware. But that one works ok with swiotlb/IOMMU anyways, so it doesn't really need GFP_DMA32. I chose 4GB to be compatible with IA64 and because it seems to be the most common restriction. The new zone is so far added only for x86-64. For other architectures who don't set up this new zone nothing changes. Architectures can set a compatibility define in Kconfig CONFIG_DMA_IS_DMA32 that will define GFP_DMA32 as GFP_DMA. Otherwise it's a nop because on 32bit architectures it's normally not needed because GFP_NORMAL (=0) is DMA able enough. One problem is still that GFP_DMA means different things on different architectures. e.g. some drivers used to have #ifdef ia64 use GFP_DMA (trusting it to be 4GB) #elif __x86_64__ (use other hacks like the swiotlb because 16MB is not enough) ... . This was quite ugly and is now obsolete. These should be now converted to use GFP_DMA32 unconditionally. I haven't done this yet. Or best only use pci_alloc_consistent/dma_alloc_coherent which will use GFP_DMA32 transparently. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/dma.h | 11 +++++++++-- include/asm-x86_64/proto.h | 2 ++ include/linux/gfp.h | 11 +++++++++++ include/linux/mmzone.h | 16 +++++++++------- 4 files changed, 31 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h index 16fa3a0..6f2a817 100644 --- a/include/asm-x86_64/dma.h +++ b/include/asm-x86_64/dma.h @@ -72,8 +72,15 @@ #define MAX_DMA_CHANNELS 8 -/* The maximum address that we can perform a DMA transfer to on this platform */ -#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) + +/* 16MB ISA DMA zone */ +#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) + +/* 4GB broken PCI/AGP hardware bus master zone */ +#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) + +/* Compat define for old dma zone */ +#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) /* 8237 DMA controllers */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index dbb37b0..c251152 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -22,6 +22,8 @@ extern void mtrr_bp_init(void); #define mtrr_bp_init() do {} while (0) #endif extern void init_memory_mapping(unsigned long start, unsigned long end); +extern void size_zones(unsigned long *z, unsigned long *h, + unsigned long start_pfn, unsigned long end_pfn); extern void system_call(void); extern int kernel_syscall(void); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c377943..4351e6b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -14,6 +14,13 @@ struct vm_area_struct; /* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */ #define __GFP_DMA ((__force gfp_t)0x01u) #define __GFP_HIGHMEM ((__force gfp_t)0x02u) +#ifdef CONFIG_DMA_IS_DMA32 +#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */ +#elif BITS_PER_LONG < 64 +#define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */ +#else +#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */ +#endif /* * Action modifiers - doesn't change the zoning @@ -64,6 +71,10 @@ struct vm_area_struct; #define GFP_DMA __GFP_DMA +/* 4GB DMA on some platforms */ +#define GFP_DMA32 __GFP_DMA32 + + #define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK)) /* diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f5fa308..da7a829 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -71,10 +71,11 @@ struct per_cpu_pageset { #endif #define ZONE_DMA 0 -#define ZONE_NORMAL 1 -#define ZONE_HIGHMEM 2 +#define ZONE_DMA32 1 +#define ZONE_NORMAL 2 +#define ZONE_HIGHMEM 3 -#define MAX_NR_ZONES 3 /* Sync this with ZONES_SHIFT */ +#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ @@ -108,9 +109,10 @@ struct per_cpu_pageset { /* * On machines where it is needed (eg PCs) we divide physical memory - * into multiple physical zones. On a PC we have 3 zones: + * into multiple physical zones. On a PC we have 4 zones: * * ZONE_DMA < 16 MB ISA DMA capable memory + * ZONE_DMA32 0 MB Empty * ZONE_NORMAL 16-896 MB direct mapped by the kernel * ZONE_HIGHMEM > 896 MB only page cache and user processes */ @@ -455,10 +457,10 @@ extern struct pglist_data contig_page_data; #if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) /* - * with 32 bit page->flags field, we reserve 8 bits for node/zone info. - * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes. + * with 32 bit page->flags field, we reserve 9 bits for node/zone info. + * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. */ -#define FLAGS_RESERVED 8 +#define FLAGS_RESERVED 9 #elif BITS_PER_LONG == 64 /* -- cgit v1.1 From 979edfadbae2286eec5b46143c00e81bca96498e Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Adjust, correct, and complete the HPET definitions for x86-64. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/hpet.h | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h index a3877f5..c20c28f 100644 --- a/include/asm-x86_64/hpet.h +++ b/include/asm-x86_64/hpet.h @@ -14,18 +14,18 @@ #define HPET_CFG 0x010 #define HPET_STATUS 0x020 #define HPET_COUNTER 0x0f0 -#define HPET_T0_CFG 0x100 -#define HPET_T0_CMP 0x108 -#define HPET_T0_ROUTE 0x110 -#define HPET_T1_CFG 0x120 -#define HPET_T1_CMP 0x128 -#define HPET_T1_ROUTE 0x130 -#define HPET_T2_CFG 0x140 -#define HPET_T2_CMP 0x148 -#define HPET_T2_ROUTE 0x150 +#define HPET_Tn_OFFSET 0x20 +#define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET) +#define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET) +#define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET) +#define HPET_T0_CFG HPET_Tn_CFG(0) +#define HPET_T0_CMP HPET_Tn_CMP(0) +#define HPET_T1_CFG HPET_Tn_CFG(1) +#define HPET_T1_CMP HPET_Tn_CMP(1) #define HPET_ID_VENDOR 0xffff0000 #define HPET_ID_LEGSUP 0x00008000 +#define HPET_ID_64BIT 0x00002000 #define HPET_ID_NUMBER 0x00001f00 #define HPET_ID_REV 0x000000ff #define HPET_ID_NUMBER_SHIFT 8 @@ -38,11 +38,18 @@ #define HPET_LEGACY_8254 2 #define HPET_LEGACY_RTC 8 -#define HPET_TN_ENABLE 0x004 -#define HPET_TN_PERIODIC 0x008 -#define HPET_TN_PERIODIC_CAP 0x010 -#define HPET_TN_SETVAL 0x040 -#define HPET_TN_32BIT 0x100 +#define HPET_TN_LEVEL 0x0002 +#define HPET_TN_ENABLE 0x0004 +#define HPET_TN_PERIODIC 0x0008 +#define HPET_TN_PERIODIC_CAP 0x0010 +#define HPET_TN_64BIT_CAP 0x0020 +#define HPET_TN_SETVAL 0x0040 +#define HPET_TN_32BIT 0x0100 +#define HPET_TN_ROUTE 0x3e00 +#define HPET_TN_FSB 0x4000 +#define HPET_TN_FSB_CAP 0x8000 + +#define HPET_TN_ROUTE_SHIFT 9 extern int is_hpet_enabled(void); extern int hpet_rtc_timer_init(void); -- cgit v1.1 From 89b831ef8bf5cfbb357dbc0a2e07700d7f20eec5 Mon Sep 17 00:00:00 2001 From: Jacob Shin Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Support for AMD specific MCE Threshold. MC4_MISC - DRAM Errors Threshold Register realized under AMD K8 Rev F. This register is used to count correctable and uncorrectable ECC errors that occur during DRAM read operations. The user may interface through sysfs files in order to change the threshold configuration. bank%d/error_count - reads current error count, write to clear. bank%d/interrupt_enable - set/clear interrupt enable. bank%d/threshold_limit - read/write the threshold limit. APIC vector 0xF9 in hw_irq.h. 5 software defined bank ids in mce.h. new apic.c function to setup threshold apic lvt. defaults to interrupt off, count enabled, and threshold limit max. sysfs interface created on /sys/devices/system/threshold. AK: added some ifdefs to make it compile on UP Signed-off-by: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/apic.h | 2 ++ include/asm-x86_64/hw_irq.h | 2 +- include/asm-x86_64/mce.h | 10 ++++++++++ 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index 6c5d5ca..5647b7d 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -111,6 +111,8 @@ extern unsigned int nmi_watchdog; extern int disable_timer_pin_1; +extern void setup_threshold_lvt(unsigned long lvt_off); + #endif /* CONFIG_X86_LOCAL_APIC */ extern unsigned boot_cpu_id; diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index dc97668..c14a8c7 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h @@ -55,7 +55,7 @@ struct hw_interrupt_type; #define CALL_FUNCTION_VECTOR 0xfc #define KDB_VECTOR 0xfb /* reserved for KDB */ #define THERMAL_APIC_VECTOR 0xfa -/* 0xf9 free */ +#define THRESHOLD_APIC_VECTOR 0xf9 #define INVALIDATE_TLB_VECTOR_END 0xf8 #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */ diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index 869249d..5d298b7 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h @@ -67,6 +67,8 @@ struct mce_log { /* Software defined banks */ #define MCE_EXTENDED_BANK 128 #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 +#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ +#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 void mce_log(struct mce *m); #ifdef CONFIG_X86_MCE_INTEL @@ -77,4 +79,12 @@ static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) } #endif +#ifdef CONFIG_X86_MCE_AMD +void mce_amd_feature_init(struct cpuinfo_x86 *c); +#else +static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) +{ +} +#endif + #endif -- cgit v1.1 From 6004e1b7effcbb385a6b7c790e4b8008682cf679 Mon Sep 17 00:00:00 2001 From: James Cleverdon Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] i386/x86-64: Share interrupt vectors when there is a large number of interrupt sources Here's a patch that builds on Natalie Protasevich's IRQ compression patch and tries to work for MPS boots as well as ACPI. It is meant for a 4-node IBM x460 NUMA box, which was dying because it had interrupt pins with GSI numbers > NR_IRQS and thus overflowed irq_desc. The problem is that this system has 270 GSIs (which are 1:1 mapped with I/O APIC RTEs) and an 8-node box would have 540. This is much bigger than NR_IRQS (224 for both i386 and x86_64). Also, there aren't enough vectors to go around. There are about 190 usable vectors, not counting the reserved ones and the unused vectors at 0x20 to 0x2F. So, my patch attempts to compress the GSI range and share vectors by sharing IRQs. Cc: "Protasevich, Natalie" Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/desc.h | 3 +++ include/asm-x86_64/mpspec.h | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index 68ac3c6..1a3d380 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h @@ -98,16 +98,19 @@ static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsig static inline void set_intr_gate(int nr, void *func) { + BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); } static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) { + BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); } static inline void set_system_gate(int nr, void *func) { + BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); } diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h index f267e10..6b37538 100644 --- a/include/asm-x86_64/mpspec.h +++ b/include/asm-x86_64/mpspec.h @@ -157,7 +157,8 @@ struct mpc_config_lintsrc */ #define MAX_MP_BUSSES 256 -#define MAX_IRQ_SOURCES 256 +/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ +#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) enum mp_bustype { MP_BUS_ISA = 1, MP_BUS_EISA, -- cgit v1.1 From 1dff7f3db5f045ccbfeca5bb00b0958a78501557 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Fix up outdated pfn_to_page comment pfn_to_page really requires pfn_valid to be true now, no question. Some people stumbled over it, but it was misleading and wrong. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/mmzone.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index b40c661..8d48dc7 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -41,9 +41,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) -/* AK: this currently doesn't deal with invalid addresses. We'll see - if the 2.5 kernel doesn't pass them - (2.4 used to). */ +/* Requires pfn_valid(pfn) to be true */ #define pfn_to_page(pfn) ({ \ int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \ ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \ -- cgit v1.1 From 07808b74e7dab1aa385e698795875337d72daf7d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Remove obsolete ARCH_HAS_ATOMIC_UNSIGNED and page_flags_t Has been introduced for x86-64 at some point to save memory in struct page, but has been obsolete for some time. Just remove it. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/linux/mm.h | 10 ++-------- include/linux/mmzone.h | 2 +- 2 files changed, 3 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 5c1fb0a..23fad4d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -206,12 +206,6 @@ struct vm_operations_struct { struct mmu_gather; struct inode; -#ifdef ARCH_HAS_ATOMIC_UNSIGNED -typedef unsigned page_flags_t; -#else -typedef unsigned long page_flags_t; -#endif - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -219,7 +213,7 @@ typedef unsigned long page_flags_t; * a page. */ struct page { - page_flags_t flags; /* Atomic flags, some possibly + unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ atomic_t _count; /* Usage count, see below. */ atomic_t _mapcount; /* Count of ptes mapped in mms, @@ -435,7 +429,7 @@ static inline void put_page(struct page *page) #endif /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ -#define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH) +#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index da7a829..57fc99c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -455,7 +455,7 @@ extern struct pglist_data contig_page_data; #include #endif -#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) +#if BITS_PER_LONG == 32 /* * with 32 bit page->flags field, we reserve 9 bits for node/zone info. * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. -- cgit v1.1 From 69d81fcde7797342417591ba7affb372b9c86eae Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Speed up numa_node_id by putting it directly into the PDA Not go from the CPU number to an mapping array. Mode number is often used now in fast paths. This also adds a generic numa_node_id to all the topology includes Suggested by Eric Dumazet Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/numa.h | 2 ++ include/asm-x86_64/pda.h | 1 + include/asm-x86_64/topology.h | 2 ++ include/linux/mmzone.h | 2 ++ 4 files changed, 7 insertions(+) (limited to 'include') diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h index bcf55c3..d51e56f 100644 --- a/include/asm-x86_64/numa.h +++ b/include/asm-x86_64/numa.h @@ -17,6 +17,8 @@ extern void numa_add_cpu(int cpu); extern void numa_init_array(void); extern int numa_off; +extern void numa_set_node(int cpu, int node); + extern unsigned char apicid_to_node[256]; #define NUMA_NO_NODE 0xff diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index bbf89aa..8733ccf 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h @@ -15,6 +15,7 @@ struct x8664_pda { int irqcount; /* Irq nesting counter. Starts with -1 */ int cpunumber; /* Logical CPU number */ char *irqstackptr; /* top of irqstack */ + int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ struct mm_struct *active_mm; diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 1c603cd..d39ebd5 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -28,6 +28,8 @@ extern int __node_distance(int, int); #define pcibus_to_node(bus) ((long)(bus->sysdata)) #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); +#define numa_node_id() read_pda(nodenumber) + /* sched_domains SD_NODE_INIT for x86_64 machines */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 57fc99c..f3cffc3 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -435,7 +435,9 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, #include /* Returns the number of the current Node. */ +#ifndef numa_node_id #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) +#endif #ifndef CONFIG_NEED_MULTIPLE_NODES -- cgit v1.1 From f6c2e3330d3fdd5474bc3756da46fca889a30e33 Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Unmap NULL during early bootup We should zap the low mappings, as soon as possible, so that we can catch kernel bugs more effectively. Previously early boot had NULL mapped and didn't trap on NULL references. This patch introduces boot_level4_pgt, which will always have low identity addresses mapped. Druing boot, all the processors will use this as their level4 pgt. On BP, we will switch to init_level4_pgt as soon as we enter C code and zap the low mappings as soon as we are done with the usage of identity low mapped addresses. On AP's we will zap the low mappings as soon as we jump to C code. Signed-off-by: Suresh Siddha Signed-off-by: Ashok Raj Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/pgtable.h | 1 + include/asm-x86_64/proto.h | 2 ++ include/asm-x86_64/smp.h | 1 - 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 7a07196..a204efb 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -16,6 +16,7 @@ extern pud_t level3_physmem_pgt[512]; extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pgd_t init_level4_pgt[]; +extern pgd_t boot_level4_pgt[]; extern unsigned long __supported_pte_mask; #define swapper_pg_dir init_level4_pgt diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index c251152..3450108 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -11,6 +11,8 @@ struct pt_regs; extern void start_kernel(void); extern void pda_init(int); +extern void zap_low_mappings(int cpu); + extern void early_idt_handler(void); extern void mcheck_init(struct cpuinfo_x86 *c); diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index c57ce40..592161e 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -47,7 +47,6 @@ extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); extern int smp_num_siblings; extern void smp_send_reschedule(int cpu); -extern void zap_low_mappings(void); void smp_stop_cpu(void); extern int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, int retry, int wait); -- cgit v1.1 From 6b75aeedde1e8a8513393d3c1367bf81bc5b0c67 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Don't apply __PHYSICAL_MASK to page frame numbers It is for physical addresses, not for PFNs. Pointed out by Tejun Heo. Cc: htejun@gmail.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/page.h | 2 +- include/asm-x86_64/pgtable.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index e5ab4d2..06e489f 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -11,7 +11,7 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #endif #define PAGE_MASK (~(PAGE_SIZE-1)) -#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT)) +#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) #define THREAD_ORDER 1 #ifdef __ASSEMBLY__ diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index a204efb..f8e87a5 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -246,7 +246,7 @@ static inline unsigned long pud_bad(pud_t pud) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) +#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { @@ -353,7 +353,7 @@ static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) -#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) +#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) -- cgit v1.1 From 2bc0414ee04fd8bb798760801f5d7476dff44241 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Only use asm/sections.h to declare section symbols Adding __initdata_* to asm-generic/sections.h Replaces a lot of open coded externs in arch/x86_64/* I had to change __bss_end to __bss_stop to match the other architectures. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-generic/sections.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 886dbd1..0b49f9e 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -13,5 +13,6 @@ extern char _eextratext[] __attribute__((weak)); extern char _end[]; extern char __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; +extern char __initdata_begin[], __initdata_end[]; #endif /* _ASM_GENERIC_SECTIONS_H_ */ -- cgit v1.1 From 485832a5d928facd82f1525270d9f048da2063a1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Use int operations in spinlocks to support more than 128 CPUs spinning. Pointed out by Eric Dumazet Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/spinlock.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 6963683..fe484a6 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -18,22 +18,22 @@ */ #define __raw_spin_is_locked(x) \ - (*(volatile signed char *)(&(x)->slock) <= 0) + (*(volatile signed int *)(&(x)->slock) <= 0) #define __raw_spin_lock_string \ "\n1:\t" \ - "lock ; decb %0\n\t" \ + "lock ; decl %0\n\t" \ "js 2f\n" \ LOCK_SECTION_START("") \ "2:\t" \ "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ + "cmpl $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END #define __raw_spin_unlock_string \ - "movb $1,%0" \ + "movl $1,%0" \ :"=m" (lock->slock) : : "memory" static inline void __raw_spin_lock(raw_spinlock_t *lock) @@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline int __raw_spin_trylock(raw_spinlock_t *lock) { - char oldval; + int oldval; __asm__ __volatile__( - "xchgb %b0,%1" + "xchgl %0,%1" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); -- cgit v1.1 From 420f8f68c9c5148dddf946bebdbc7eacde2172cb Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: New heuristics to find out hotpluggable CPUs. With a NR_CPUS==128 kernel with CPU hotplug enabled we would waste 4MB on per CPU data of all possible CPUs. The reason was that HOTPLUG always set up possible map to NR_CPUS cpus and then we need to allocate that much (each per CPU data is roughly ~32k now) The underlying problem is that ACPI didn't tell us how many hotplug CPUs the platform supports. So the old code just assumed all, which would lead to this memory wastage. This implements some new heuristics: - If the BIOS specified disabled CPUs in the ACPI/mptables assume they can be enabled later (this is bending the ACPI specification a bit, but seems like a obvious extension) - The user can overwrite it with a new additionals_cpus=NUM option - Otherwise use half of the available CPUs or 2, whatever is more. Cc: ashok.raj@intel.com Cc: len.brown@intel.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/smp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 592161e..cf8f969 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -81,6 +81,8 @@ extern int safe_smp_processor_id(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); +extern unsigned num_processors; +extern unsigned disabled_cpus; #endif /* !ASSEMBLY */ -- cgit v1.1 From ea0be473a1f0ee89024a24d8ea4b05fbf6efcee3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Allow modular build of ia32 aout loader Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/ia32.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h index 6efa00f..c7bc9c0 100644 --- a/include/asm-x86_64/ia32.h +++ b/include/asm-x86_64/ia32.h @@ -165,6 +165,11 @@ struct siginfo_t; int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info); int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info); int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs); + +struct linux_binprm; +extern int ia32_setup_arg_pages(struct linux_binprm *bprm, + unsigned long stack_top, int exec_stack); + #endif #endif /* !CONFIG_IA32_SUPPORT */ -- cgit v1.1 From a88cde13bae3fffd6ecc812bdd02c91eafb6073e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Formatting fixes for arch/x86_64/kernel/process.c No functional changes. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/msr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 5a7fe3c..24dc396 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -19,7 +19,7 @@ : "=a" (a__), "=d" (b__) \ : "c" (msr)); \ val = a__ | (b__<<32); \ -} while(0); +} while(0) #define wrmsr(msr,val1,val2) \ __asm__ __volatile__("wrmsr" \ -- cgit v1.1 From e90f22edf432512219cc2952f5811961abbd164f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Fix NUMA node lookup debug code which had bitrotted Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/mmzone.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 8d48dc7..69baaa8 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -17,16 +17,15 @@ /* Simple perfect hash to map physical addresses to node numbers */ extern int memnode_shift; extern u8 memnodemap[NODEMAPSIZE]; -extern int maxnode; extern struct pglist_data *node_data[]; static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) { - int nid; + unsigned nid; VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE); nid = memnodemap[addr >> memnode_shift]; - VIRTUAL_BUG_ON(nid > maxnode); + VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); return nid; } -- cgit v1.1 From 94605eff572b727aaad9b4b29bc358b919096503 Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86-64/i386: Intel HT, Multi core detection fixes Fields obtained through cpuid vector 0x1(ebx[16:23]) and vector 0x4(eax[14:25], eax[26:31]) indicate the maximum values and might not always be the same as what is available and what OS sees. So make sure "siblings" and "cpu cores" values in /proc/cpuinfo reflect the values as seen by OS instead of what cpuid instruction says. This will also fix the buggy BIOS cases (for example where cpuid on a single core cpu says there are "2" siblings, even when HT is disabled in the BIOS. http://bugzilla.kernel.org/show_bug.cgi?id=4359) Signed-off-by: Suresh Siddha Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-i386/processor.h | 4 +++- include/asm-x86_64/processor.h | 4 +++- include/linux/bitops.h | 10 ++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 0a4ec76..9cd4a05 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -65,7 +65,9 @@ struct cpuinfo_x86 { int f00f_bug; int coma_bug; unsigned long loops_per_jiffy; - unsigned char x86_num_cores; + unsigned char x86_max_cores; /* cpuid returned max cores value */ + unsigned char booted_cores; /* number of cores as seen by OS */ + unsigned char apicid; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 03837d3..4861246 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -61,10 +61,12 @@ struct cpuinfo_x86 { int x86_cache_alignment; int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ __u8 x86_virt_bits, x86_phys_bits; - __u8 x86_num_cores; + __u8 x86_max_cores; /* cpuid returned max cores value */ __u32 x86_power; __u32 extended_cpuid_level; /* Max extended CPUID function supported */ unsigned long loops_per_jiffy; + __u8 apicid; + __u8 booted_cores; /* number of cores as seen by OS */ } ____cacheline_aligned; #define X86_VENDOR_INTEL 0 diff --git a/include/linux/bitops.h b/include/linux/bitops.h index cb3c3ef..38c2fb7 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -84,6 +84,16 @@ static __inline__ int get_bitmask_order(unsigned int count) return order; /* We could be slightly more clever with -1 here... */ } +static __inline__ int get_count_order(unsigned int count) +{ + int order; + + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + /* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word -- cgit v1.1 From a6f5deb2be4c82f24fefadcbf7e448f540c05ae6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Reduce number of retries for reset through keyboard controller Old code could retry for 10 seconds worst time. Only try it for one second now. Suggested by Yinghai Lu Cc: Yinghai.Lu@amd.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-i386/mach-default/mach_reboot.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h index 06ae4d8..a955e57 100644 --- a/include/asm-i386/mach-default/mach_reboot.h +++ b/include/asm-i386/mach-default/mach_reboot.h @@ -19,7 +19,7 @@ static inline void kb_wait(void) static inline void mach_reboot(void) { int i; - for (i = 0; i < 100; i++) { + for (i = 0; i < 10; i++) { kb_wait(); udelay(50); outb(0x60, 0x64); /* write Controller Command Byte */ -- cgit v1.1 From 8e0d4f4e9132ae6e353f9cf27261627bcc7c65cc Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Remove asm-x86_64/rwsem.h Not needed since x86-64 always uses the spinlock based rwsems. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/rwsem.h | 283 --------------------------------------------- 1 file changed, 283 deletions(-) delete mode 100644 include/asm-x86_64/rwsem.h (limited to 'include') diff --git a/include/asm-x86_64/rwsem.h b/include/asm-x86_64/rwsem.h deleted file mode 100644 index 46077e9..0000000 --- a/include/asm-x86_64/rwsem.h +++ /dev/null @@ -1,283 +0,0 @@ -/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for x86_64+ - * - * Written by David Howells (dhowells@redhat.com). - * Ported by Andi Kleen to x86-64. - * - * Derived from asm-i386/semaphore.h and asm-i386/rwsem.h - * - * - * The MSW of the count is the negated number of active writers and waiting - * lockers, and the LSW is the total number of active locks - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an - * uncontended lock. This can be determined because XADD returns the old value. - * Readers increment by 1 and see a positive value when uncontended, negative - * if there are writers (and maybe) readers waiting (in which case it goes to - * sleep). - * - * The value of WAITING_BIAS supports up to 32766 waiting processes. This can - * be extended to 65534 by manually checking the whole MSW rather than relying - * on the S flag. - * - * The value of ACTIVE_BIAS supports up to 65535 active processes. - * - * This should be totally fair - if anything is waiting, a process that wants a - * lock will go to the back of the queue. When the currently active lock is - * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consecutive readers at the - * front, then they'll all be woken up, but no other readers will be. - */ - -#ifndef _X8664_RWSEM_H -#define _X8664_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ - -#include -#include - -struct rwsem_waiter; - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed int count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif -} - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%rdi)\n\t" /* adds 0x00000001, returns the old value */ - " js 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - LOCK_SECTION_START("") \ - "2:\n\t" - " call rwsem_down_read_failed_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END \ - "# ending down_read\n\t" - : "+m"(sem->count) - : "D"(sem) - : "memory", "cc"); -} - - -/* - * trylock for reading -- returns 1 if successful, 0 if contention - */ -static inline int __down_read_trylock(struct rw_semaphore *sem) -{ - __s32 result, tmp; - __asm__ __volatile__( - "# beginning __down_read_trylock\n\t" - " movl %0,%1\n\t" - "1:\n\t" - " movl %1,%2\n\t" - " addl %3,%2\n\t" - " jle 2f\n\t" -LOCK_PREFIX " cmpxchgl %2,%0\n\t" - " jnz 1b\n\t" - "2:\n\t" - "# ending __down_read_trylock\n\t" - : "+m"(sem->count), "=&a"(result), "=&r"(tmp) - : "i"(RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); - return result>=0 ? 1 : 0; -} - - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - int tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xaddl %0,(%%rdi)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %0,%0\n\t" /* was the count 0 before? */ - " jnz 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " call rwsem_down_write_failed_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END - "# ending down_write" - : "=&r" (tmp) - : "0"(tmp), "D"(sem) - : "memory", "cc"); -} - -/* - * trylock for writing -- returns 1 if successful, 0 if contention - */ -static inline int __down_write_trylock(struct rw_semaphore *sem) -{ - signed long ret = cmpxchg(&sem->count, - RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); - if (ret == RWSEM_UNLOCKED_VALUE) - return 1; - return 0; -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* subtracts 1, returns the old value */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " decw %w[tmp]\n\t" /* do nothing if still outstanding active readers */ - " jnz 1b\n\t" - " call rwsem_wake_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END - "# ending __up_read\n" - : "+m"(sem->count), [tmp] "+r" (tmp) - : "D"(sem) - : "memory", "cc"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - unsigned tmp; - __asm__ __volatile__( - "# beginning __up_write\n\t" - " movl %[bias],%[tmp]\n\t" -LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jnz 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " decw %w[tmp]\n\t" /* did the active count reduce to 0? */ - " jnz 1b\n\t" /* jump back if not */ - " call rwsem_wake_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END - "# ending __up_write\n" - : "+m"(sem->count), [tmp] "=r" (tmp) - : "D"(sem), [bias] "i"(-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc"); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __downgrade_write\n\t" -LOCK_PREFIX " addl %[bias],(%%rdi)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " call rwsem_downgrade_thunk\n" - " jmp 1b\n" - LOCK_SECTION_END - "# ending __downgrade_write\n" - : "=m"(sem->count) - : "D"(sem), [bias] "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) - : "memory", "cc"); -} - -/* - * implement atomic add functionality - */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) -{ - __asm__ __volatile__( -LOCK_PREFIX "addl %1,%0" - :"=m"(sem->count) - :"ir"(delta), "m"(sem->count)); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int tmp = delta; - - __asm__ __volatile__( -LOCK_PREFIX "xaddl %0,(%2)" - : "=r"(tmp), "=m"(sem->count) - : "r"(sem), "m"(sem->count), "0" (tmp) - : "memory"); - - return tmp+delta; -} - -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - -#endif /* __KERNEL__ */ -#endif /* _X8664_RWSEM_H */ -- cgit v1.1 From bf0f2e23834e2bf7d64b467ef07095b1c7e2c04b Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Set ____cacheline_maxaligned_in_smp alignment to 128 bytes The current value was correct before the introduction of Intel EM64T support - but now L1_CACHE_SHIFT_MAX can be less than L1_CACHE_SHIFT, which _is_ funny! Between the few users of ____cacheline_maxaligned_in_smp, we also have (for example) rcu_ctrlblk, and struct zone, with zone->{lru_,}lock. I.e. we have a lot of excess cacheline bouncing on them. No correctness issues, obviously. So this could even be merged for 2.6.14 (I'm not a fan of this idea, though). CC: Andi Kleen Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/cache.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h index eda62ba..33e5342 100644 --- a/include/asm-x86_64/cache.h +++ b/include/asm-x86_64/cache.h @@ -9,6 +9,6 @@ /* L1 cache line size */ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define L1_CACHE_SHIFT_MAX 6 /* largest L1 which this arch supports */ +#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ #endif -- cgit v1.1 From efbbdce94f6ea54cf06d9a06e4c95f6874ad64a8 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Use common sys_time64 Keeping this function does not makes sense because it's a copied (and buggy) copy of sys_time. The only difference is that now.tv_sec (which is a time_t, i.e. a 64-bit long) is copied (and truncated) into a int (32-bit). The prototype is the same (they both take a long __user *), so let's drop this and redirect it to sys_time (and make sure it exists by defining __ARCH_WANT_SYS_TIME). Only disadvantage is that the sys_stime definition is also compiled (may be fixed if needed by adding a separate __ARCH_WANT_SYS_STIME macro, and defining it for all arch's defining __ARCH_WANT_SYS_TIME except x86_64). Acked-by: Andi Kleen Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/unistd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 3c494b6..2c42150 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -462,7 +462,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr) #define __NR_tkill 200 __SYSCALL(__NR_tkill, sys_tkill) #define __NR_time 201 -__SYSCALL(__NR_time, sys_time64) +__SYSCALL(__NR_time, sys_time) #define __NR_futex 202 __SYSCALL(__NR_futex, sys_futex) #define __NR_sched_setaffinity 203 @@ -608,6 +608,7 @@ do { \ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_COMPAT_SYS_TIME #endif -- cgit v1.1 From 8893166ff8694f36655009aa9bf8e7f2e1c9339f Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:54 +0100 Subject: [PATCH] x86_64: Increase the maximum number of local APICs to the maximum This is needed for large multinode IBM systems which have a sparse APIC space in clustered mode, fully covering the available 8 bits. The previous kernels would limit the local APIC number to 127, which caused it to reject some of the CPUs at boot. I increased the maximum and shrunk the apic_version array a bit to make up for that (the version is only 8 bit, so don't need an full int to store) Cc: Chris McDermott Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/mpspec.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h index 6b37538..6f8a17d 100644 --- a/include/asm-x86_64/mpspec.h +++ b/include/asm-x86_64/mpspec.h @@ -16,7 +16,7 @@ /* * A maximum of 255 APICs with the current APIC ID architecture. */ -#define MAX_APICS 128 +#define MAX_APICS 255 struct intel_mp_floating { @@ -173,7 +173,7 @@ extern int smp_found_config; extern void find_smp_config (void); extern void get_smp_config (void); extern int nr_ioapics; -extern int apic_version [MAX_APICS]; +extern unsigned char apic_version [MAX_APICS]; extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; extern int mpc_default_type; -- cgit v1.1 From cf225356578326308b16a0fd03ff3fa72fe3da07 Mon Sep 17 00:00:00 2001 From: Jochen Friedrich Date: Mon, 14 Nov 2005 21:58:18 -0800 Subject: [LLC]: Fix typo Signed-off-by: Jochen Friedrich Acked-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/net/llc_pdu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index c7a9594..8f63065 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -357,7 +357,7 @@ static inline void llc_pdu_init_as_test_rsp(struct sk_buff *skb, /* LLC Type 1 XID command/response information fields format */ struct llc_xid_info { - u8 fmt_id; /* always 0x18 for LLC */ + u8 fmt_id; /* always 0x81 for LLC */ u8 type; /* different if NULL/non-NULL LSAP */ u8 rw; /* sender receive window */ }; -- cgit v1.1 From d4ed803c564701eae9534ab26a86ddb06acaf49c Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Tue, 15 Nov 2005 00:09:06 -0800 Subject: [PATCH] Make sysctl.h (again) usable from userspace Make sysctl.h (again) useable from userspace Signed-off-by: Harald Welte Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sysctl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 64f203c..6bc03c9 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -20,7 +20,6 @@ #include #include -#include #include struct file; @@ -859,6 +858,7 @@ enum }; #ifdef __KERNEL__ +#include extern void sysctl_init(void); -- cgit v1.1 From f4eeb0a20f017fd8bc849cc50469c2e2e6a0c05c Mon Sep 17 00:00:00 2001 From: Miles Bader Date: Tue, 15 Nov 2005 00:09:17 -0800 Subject: [PATCH] v850: Add missing include in hardirq.h Signed-off-by: Miles Bader Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-v850/hardirq.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-v850/hardirq.h b/include/asm-v850/hardirq.h index 5dfca80..4f913bc 100644 --- a/include/asm-v850/hardirq.h +++ b/include/asm-v850/hardirq.h @@ -5,6 +5,8 @@ #include #include +#include + typedef struct { unsigned int __softirq_pending; } ____cacheline_aligned irq_cpustat_t; -- cgit v1.1 From 0c53508980a95b84c296c4336a831776cc22cf58 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Nov 2005 00:09:18 -0800 Subject: [PATCH] v850: use generic hardirq code Signed-off-by: Christoph Hellwig Acked-by: Miles Bader Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-v850/hardirq.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-v850/hardirq.h b/include/asm-v850/hardirq.h index 4f913bc..d98488c 100644 --- a/include/asm-v850/hardirq.h +++ b/include/asm-v850/hardirq.h @@ -24,4 +24,6 @@ typedef struct { # error HARDIRQ_BITS is too low! #endif +void ack_bad_irq(unsigned int irq); + #endif /* __V850_HARDIRQ_H__ */ -- cgit v1.1 From 31f3426904e066f17e3f88c468a2f7c869ad4aac Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 15 Nov 2005 15:17:10 -0800 Subject: [TCP]: More spelling fixes. From Joe Perches Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/net/tcp.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 0f98480..d78025f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -552,8 +552,8 @@ extern u32 __tcp_select_window(struct sock *sk); /* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot - * of jiffies in the buffer control blocks below. We decidedly - * only use of the low 32-bits of jiffies and hide the ugly + * of jiffies in the buffer control blocks below. We decided + * to use only the low 32-bits of jiffies and hide the ugly * casts with the following macro. */ #define tcp_time_stamp ((__u32)(jiffies)) -- cgit v1.1