aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/hp
diff options
context:
space:
mode:
authorMatt LaPlante <kernel1@cyberdogtech.com>2006-11-30 05:24:39 +0100
committerAdrian Bunk <bunk@stusta.de>2006-11-30 05:24:39 +0100
commit0779bf2d2ecc4d9b1e9437ae659f50e6776a7666 (patch)
treedbcc9735ab63a833056572c8f4f0efe911246562 /arch/ia64/hp
parent3cb2fccc5f48a4d6269dfd00b4db570fca2a04d5 (diff)
downloadkernel_samsung_smdk4412-0779bf2d2ecc4d9b1e9437ae659f50e6776a7666.zip
kernel_samsung_smdk4412-0779bf2d2ecc4d9b1e9437ae659f50e6776a7666.tar.gz
kernel_samsung_smdk4412-0779bf2d2ecc4d9b1e9437ae659f50e6776a7666.tar.bz2
Fix misc .c/.h comment typos
Fix various .c/.h typos in comments (no code changes). Signed-off-by: Matt LaPlante <kernel1@cyberdogtech.com> Signed-off-by: Adrian Bunk <bunk@stusta.de>
Diffstat (limited to 'arch/ia64/hp')
-rw-r--r--arch/ia64/hp/common/sba_iommu.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index db8e1fc..14691cd 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -75,7 +75,7 @@
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
** disconnect on 4k boundaries and prevent such issues. If the device is
-** particularly agressive, this option will keep the entire pdir valid such
+** particularly aggressive, this option will keep the entire pdir valid such
** that prefetching will hit a valid address. This could severely impact
** error containment, and is therefore off by default. The page that is
** used for spill-over is poisoned, so that should help debugging somewhat.
@@ -258,10 +258,10 @@ static u64 prefetch_spill_page;
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
-** (or rather not merge) DMA's into managable chunks.
+** (or rather not merge) DMAs into manageable chunks.
** On parisc, this is more of the software/tuning constraint
-** rather than the HW. I/O MMU allocation alogorithms can be
-** faster with smaller size is (to some degree).
+** rather than the HW. I/O MMU allocation algorithms can be
+** faster with smaller sizes (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)