diff --git a/.mailmap b/.mailmap index dfab12f809ed9c678638844ced3717742986ab26..eba9bf953ef5bc2e21bc670d33c845a8483e20a6 100644 --- a/.mailmap +++ b/.mailmap @@ -66,6 +66,7 @@ Kenneth W Chen Koushik Leonid I Ananiev Linas Vepstas +Mark Brown Matthieu CASTET Michael Buesch Michael Buesch diff --git a/CREDITS b/CREDITS index c62dcb3b7e2621d918815a656f2682fda044afcb..2358846f06be53807ccbd54b2e3615eb5814c4dc 100644 --- a/CREDITS +++ b/CREDITS @@ -1653,14 +1653,14 @@ S: Chapel Hill, North Carolina 27514-4818 S: USA N: Dave Jones -E: davej@codemonkey.org.uk +E: davej@redhat.com W: http://www.codemonkey.org.uk -D: x86 errata/setup maintenance. -D: AGPGART driver. +D: Assorted VIA x86 support. +D: 2.5 AGPGART overhaul. D: CPUFREQ maintenance. -D: Backport/Forwardport merge monkey. -D: Various Janitor work. -S: United Kingdom +D: Fedora kernel maintainence. +D: Misc/Other. +S: 314 Littleton Rd, Westford, MA 01886, USA N: Martin Josfsson E: gandalf@wlug.westbo.se diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl index 4c63e5864160f159fd073039ba80efefb161ff68..ae15d55350ec8bf93e2eb79dac2ee730cd6de054 100644 --- a/Documentation/DocBook/kernel-hacking.tmpl +++ b/Documentation/DocBook/kernel-hacking.tmpl @@ -1105,7 +1105,7 @@ static struct block_device_operations opt_fops = { - Function names as strings (__FUNCTION__). + Function names as strings (__func__). diff --git a/Documentation/MSI-HOWTO.txt b/Documentation/MSI-HOWTO.txt index a51f693c15419e9b2da87df025b69d898f648b51..256defd7e1742be45497e1bcc8912d493241c5af 100644 --- a/Documentation/MSI-HOWTO.txt +++ b/Documentation/MSI-HOWTO.txt @@ -236,10 +236,8 @@ software system can set different pages for controlling accesses to the MSI-X structure. The implementation of MSI support requires the PCI subsystem, not a device driver, to maintain full control of the MSI-X table/MSI-X PBA (Pending Bit Array) and MMIO address space of the MSI-X -table/MSI-X PBA. A device driver is prohibited from requesting the MMIO -address space of the MSI-X table/MSI-X PBA. Otherwise, the PCI subsystem -will fail enabling MSI-X on its hardware device when it calls the function -pci_enable_msix(). +table/MSI-X PBA. A device driver should not access the MMIO address +space of the MSI-X table/MSI-X PBA. 5.3.2 API pci_enable_msix diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt index 8d4dc6250c582821ccca5b89127833e2b14bd4ee..fd4907a2968cb3fab71e92286c24da0244dcfcae 100644 --- a/Documentation/PCI/pci.txt +++ b/Documentation/PCI/pci.txt @@ -163,6 +163,10 @@ need pass only as many optional fields as necessary: o class and classmask fields default to 0 o driver_data defaults to 0UL. +Note that driver_data must match the value used by any of the pci_device_id +entries defined in the driver. This makes the driver_data field mandatory +if all the pci_device_id entries have a non-zero driver_data value. + Once added, the driver probe routine will be invoked for any unclaimed PCI devices listed in its (newly updated) pci_ids list. diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt index 16c251230c82398a1ad26a3754762e49fc819922..ddeb14beacc8fe8592334ba11303ef6fcd2a7404 100644 --- a/Documentation/PCI/pcieaer-howto.txt +++ b/Documentation/PCI/pcieaer-howto.txt @@ -203,22 +203,17 @@ to mmio_enabled. 3.3 helper functions -3.3.1 int pci_find_aer_capability(struct pci_dev *dev); -pci_find_aer_capability locates the PCI Express AER capability -in the device configuration space. If the device doesn't support -PCI-Express AER, the function returns 0. - -3.3.2 int pci_enable_pcie_error_reporting(struct pci_dev *dev); +3.3.1 int pci_enable_pcie_error_reporting(struct pci_dev *dev); pci_enable_pcie_error_reporting enables the device to send error messages to root port when an error is detected. Note that devices don't enable the error reporting by default, so device drivers need call this function to enable it. -3.3.3 int pci_disable_pcie_error_reporting(struct pci_dev *dev); +3.3.2 int pci_disable_pcie_error_reporting(struct pci_dev *dev); pci_disable_pcie_error_reporting disables the device to send error messages to root port when an error is detected. -3.3.4 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +3.3.3 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); pci_cleanup_aer_uncorrect_error_status cleanups the uncorrectable error status register. diff --git a/Documentation/cgroups.txt b/Documentation/cgroups/cgroups.txt similarity index 100% rename from Documentation/cgroups.txt rename to Documentation/cgroups/cgroups.txt diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt new file mode 100644 index 0000000000000000000000000000000000000000..c50ab58b72ebc26e0b03124f970e4d038f6df39e --- /dev/null +++ b/Documentation/cgroups/freezer-subsystem.txt @@ -0,0 +1,99 @@ + The cgroup freezer is useful to batch job management system which start +and stop sets of tasks in order to schedule the resources of a machine +according to the desires of a system administrator. This sort of program +is often used on HPC clusters to schedule access to the cluster as a +whole. The cgroup freezer uses cgroups to describe the set of tasks to +be started/stopped by the batch job management system. It also provides +a means to start and stop the tasks composing the job. + + The cgroup freezer will also be useful for checkpointing running groups +of tasks. The freezer allows the checkpoint code to obtain a consistent +image of the tasks by attempting to force the tasks in a cgroup into a +quiescent state. Once the tasks are quiescent another task can +walk /proc or invoke a kernel interface to gather information about the +quiesced tasks. Checkpointed tasks can be restarted later should a +recoverable error occur. This also allows the checkpointed tasks to be +migrated between nodes in a cluster by copying the gathered information +to another node and restarting the tasks there. + + Sequences of SIGSTOP and SIGCONT are not always sufficient for stopping +and resuming tasks in userspace. Both of these signals are observable +from within the tasks we wish to freeze. While SIGSTOP cannot be caught, +blocked, or ignored it can be seen by waiting or ptracing parent tasks. +SIGCONT is especially unsuitable since it can be caught by the task. Any +programs designed to watch for SIGSTOP and SIGCONT could be broken by +attempting to use SIGSTOP and SIGCONT to stop and resume tasks. We can +demonstrate this problem using nested bash shells: + + $ echo $$ + 16644 + $ bash + $ echo $$ + 16690 + + From a second, unrelated bash shell: + $ kill -SIGSTOP 16690 + $ kill -SIGCONT 16990 + + + + This happens because bash can observe both signals and choose how it +responds to them. + + Another example of a program which catches and responds to these +signals is gdb. In fact any program designed to use ptrace is likely to +have a problem with this method of stopping and resuming tasks. + + In contrast, the cgroup freezer uses the kernel freezer code to +prevent the freeze/unfreeze cycle from becoming visible to the tasks +being frozen. This allows the bash example above and gdb to run as +expected. + + The freezer subsystem in the container filesystem defines a file named +freezer.state. Writing "FROZEN" to the state file will freeze all tasks in the +cgroup. Subsequently writing "THAWED" will unfreeze the tasks in the cgroup. +Reading will return the current state. + +* Examples of usage : + + # mkdir /containers/freezer + # mount -t cgroup -ofreezer freezer /containers + # mkdir /containers/0 + # echo $some_pid > /containers/0/tasks + +to get status of the freezer subsystem : + + # cat /containers/0/freezer.state + THAWED + +to freeze all tasks in the container : + + # echo FROZEN > /containers/0/freezer.state + # cat /containers/0/freezer.state + FREEZING + # cat /containers/0/freezer.state + FROZEN + +to unfreeze all tasks in the container : + + # echo THAWED > /containers/0/freezer.state + # cat /containers/0/freezer.state + THAWED + +This is the basic mechanism which should do the right thing for user space task +in a simple scenario. + +It's important to note that freezing can be incomplete. In that case we return +EBUSY. This means that some tasks in the cgroup are busy doing something that +prevents us from completely freezing the cgroup at this time. After EBUSY, +the cgroup will remain partially frozen -- reflected by freezer.state reporting +"FREEZING" when read. The state will remain "FREEZING" until one of these +things happens: + + 1) Userspace cancels the freezing operation by writing "THAWED" to + the freezer.state file + 2) Userspace retries the freezing operation by writing "FROZEN" to + the freezer.state file (writing "FREEZING" is not legal + and returns EIO) + 3) The tasks that blocked the cgroup from entering the "FROZEN" + state disappear from the cgroup's set of tasks. diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt index 9b53d5827361fd647f3388212e648f502defc698..1c07547d3f81f28a19edb9de8752f8865b44478d 100644 --- a/Documentation/controllers/memory.txt +++ b/Documentation/controllers/memory.txt @@ -112,14 +112,22 @@ the per cgroup LRU. 2.2.1 Accounting details -All mapped pages (RSS) and unmapped user pages (Page Cache) are accounted. -RSS pages are accounted at the time of page_add_*_rmap() unless they've already -been accounted for earlier. A file page will be accounted for as Page Cache; -it's mapped into the page tables of a process, duplicate accounting is carefully -avoided. Page Cache pages are accounted at the time of add_to_page_cache(). -The corresponding routines that remove a page from the page tables or removes -a page from Page Cache is used to decrement the accounting counters of the -cgroup. +All mapped anon pages (RSS) and cache pages (Page Cache) are accounted. +(some pages which never be reclaimable and will not be on global LRU + are not accounted. we just accounts pages under usual vm management.) + +RSS pages are accounted at page_fault unless they've already been accounted +for earlier. A file page will be accounted for as Page Cache when it's +inserted into inode (radix-tree). While it's mapped into the page tables of +processes, duplicate accounting is carefully avoided. + +A RSS page is unaccounted when it's fully unmapped. A PageCache page is +unaccounted when it's removed from radix-tree. + +At page migration, accounting information is kept. + +Note: we just account pages-on-lru because our purpose is to control amount +of used pages. not-on-lru pages are tend to be out-of-control from vm view. 2.3 Shared Page Accounting diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt index 47e568a9370afa28f703acf9ce55f88a8ca52519..5c86c258c7913dd9c97da4a3e4f7a0dd666745bc 100644 --- a/Documentation/cpusets.txt +++ b/Documentation/cpusets.txt @@ -48,7 +48,7 @@ hooks, beyond what is already present, required to manage dynamic job placement on large systems. Cpusets use the generic cgroup subsystem described in -Documentation/cgroup.txt. +Documentation/cgroups/cgroups.txt. Requests by a task, using the sched_setaffinity(2) system call to include CPUs in its CPU affinity mask, and using the mbind(2) and diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt index 295f26cd895a0d5cd460267a84ebbb687978fa69..9dd2a3bb2acc87b85d473a2080284910f04f8585 100644 --- a/Documentation/filesystems/ext3.txt +++ b/Documentation/filesystems/ext3.txt @@ -96,6 +96,11 @@ errors=remount-ro(*) Remount the filesystem read-only on an error. errors=continue Keep going on a filesystem error. errors=panic Panic and halt the machine if an error occurs. +data_err=ignore(*) Just print an error message if an error occurs + in a file data buffer in ordered mode. +data_err=abort Abort the journal if an error occurs in a file + data buffer in ordered mode. + grpid Give objects the same group ID as their creator. bsdgroups diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index c032bf39e8b9b581b6ada65f7a99b97b2dae7019..bcceb99b81dd8038130fd9b1032b3e4ba80dc113 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -1384,15 +1384,18 @@ causes the kernel to prefer to reclaim dentries and inodes. dirty_background_ratio ---------------------- -Contains, as a percentage of total system memory, the number of pages at which -the pdflush background writeback daemon will start writing out dirty data. +Contains, as a percentage of the dirtyable system memory (free pages + mapped +pages + file cache, not including locked pages and HugePages), the number of +pages at which the pdflush background writeback daemon will start writing out +dirty data. dirty_ratio ----------------- -Contains, as a percentage of total system memory, the number of pages at which -a process which is generating disk writes will itself start writing out dirty -data. +Contains, as a percentage of the dirtyable system memory (free pages + mapped +pages + file cache, not including locked pages and HugePages), the number of +pages at which a process which is generating disk writes will itself start +writing out dirty data. dirty_writeback_centisecs ------------------------- @@ -2412,24 +2415,29 @@ will be dumped when the process is dumped. coredump_filter is a bitmask of memory types. If a bit of the bitmask is set, memory segments of the corresponding memory type are dumped, otherwise they are not dumped. -The following 4 memory types are supported: +The following 7 memory types are supported: - (bit 0) anonymous private memory - (bit 1) anonymous shared memory - (bit 2) file-backed private memory - (bit 3) file-backed shared memory - (bit 4) ELF header pages in file-backed private memory areas (it is effective only if the bit 2 is cleared) + - (bit 5) hugetlb private memory + - (bit 6) hugetlb shared memory Note that MMIO pages such as frame buffer are never dumped and vDSO pages are always dumped regardless of the bitmask status. -Default value of coredump_filter is 0x3; this means all anonymous memory -segments are dumped. + Note bit 0-4 doesn't effect any hugetlb memory. hugetlb memory are only + effected by bit 5-6. + +Default value of coredump_filter is 0x23; this means all anonymous memory +segments and hugetlb private memory are dumped. If you don't want to dump all shared memory segments attached to pid 1234, -write 1 to the process's proc file. +write 0x21 to the process's proc file. - $ echo 0x1 > /proc/1234/coredump_filter + $ echo 0x21 > /proc/1234/coredump_filter When a new process is created, the process inherits the bitmask status from its parent. It is useful to set up coredump_filter before the program runs. diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index 6a0d70a22f05f238cb1da86208437818f192b495..dd84ea3c10daf00cf78a2361dce9f8b691095e4d 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -86,6 +86,15 @@ norm_unmount (*) commit on unmount; the journal is committed fast_unmount do not commit on unmount; this option makes unmount faster, but the next mount slower because of the need to replay the journal. +bulk_read read more in one go to take advantage of flash + media that read faster sequentially +no_bulk_read (*) do not bulk-read +no_chk_data_crc skip checking of CRCs on data nodes in order to + improve read performance. Use this option only + if the flash media is highly reliable. The effect + of this option is that corruption of the contents + of a file can go unnoticed. +chk_data_crc (*) do not skip checking CRCs on data nodes Quick usage instructions diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index d4f4875fc7c6d2e463e9c2b6e26d2d278cf1399c..53ba7c7d82b342d50053f6458225b29123bee747 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -101,6 +101,7 @@ parameter is applicable: X86-64 X86-64 architecture is enabled. More X86-64 boot options can be found in Documentation/x86_64/boot-options.txt . + X86 Either 32bit or 64bit x86 (same as X86-32+X86-64) In addition, the following text indicates that the option: @@ -690,7 +691,7 @@ and is between 256 and 4096 characters. It is defined in the file See Documentation/block/as-iosched.txt and Documentation/block/deadline-iosched.txt for details. - elfcorehdr= [X86-32, X86_64] + elfcorehdr= [IA64,PPC,SH,X86-32,X86_64] Specifies physical address of start of kernel core image elf header. Generally kexec loader will pass this option to capture kernel. @@ -796,6 +797,8 @@ and is between 256 and 4096 characters. It is defined in the file Defaults to the default architecture's huge page size if not specified. + hlt [BUGS=ARM,SH] + i8042.debug [HW] Toggle i8042 debug mode i8042.direct [HW] Put keyboard port into non-translated mode i8042.dumbkbd [HW] Pretend that controller can only read data from @@ -1211,6 +1214,10 @@ and is between 256 and 4096 characters. It is defined in the file mem=nopentium [BUGS=X86-32] Disable usage of 4MB pages for kernel memory. + memchunk=nn[KMG] + [KNL,SH] Allow user to override the default size for + per-device physically contiguous DMA buffers. + memmap=exactmap [KNL,X86-32,X86_64] Enable setting of an exact E820 memory map, as specified by the user. Such memmap=exactmap lines can be constructed based on @@ -1393,6 +1400,8 @@ and is between 256 and 4096 characters. It is defined in the file nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects. + nodsp [SH] Disable hardware DSP at boot time. + noefi [X86-32,X86-64] Disable EFI runtime services support. noexec [IA-64] @@ -1409,13 +1418,15 @@ and is between 256 and 4096 characters. It is defined in the file noexec32=off: disable non-executable mappings read implies executable mappings + nofpu [SH] Disable hardware FPU at boot time. + nofxsr [BUGS=X86-32] Disables x86 floating point extended register save and restore. The kernel will only save legacy floating-point registers on task switch. noclflush [BUGS=X86] Don't use the CLFLUSH instruction - nohlt [BUGS=ARM] + nohlt [BUGS=ARM,SH] no-hlt [BUGS=X86-32] Tells the kernel that the hlt instruction doesn't work correctly and not to @@ -1578,7 +1589,7 @@ and is between 256 and 4096 characters. It is defined in the file See also Documentation/paride.txt. pci=option[,option...] [PCI] various PCI subsystem options: - off [X86-32] don't probe for the PCI bus + off [X86] don't probe for the PCI bus bios [X86-32] force use of PCI BIOS, don't access the hardware directly. Use this if your machine has a non-standard PCI host bridge. @@ -1586,9 +1597,9 @@ and is between 256 and 4096 characters. It is defined in the file hardware access methods are allowed. Use this if you experience crashes upon bootup and you suspect they are caused by the BIOS. - conf1 [X86-32] Force use of PCI Configuration + conf1 [X86] Force use of PCI Configuration Mechanism 1. - conf2 [X86-32] Force use of PCI Configuration + conf2 [X86] Force use of PCI Configuration Mechanism 2. noaer [PCIE] If the PCIEAER kernel config parameter is enabled, this kernel boot option can be used to @@ -1608,37 +1619,37 @@ and is between 256 and 4096 characters. It is defined in the file this option if the kernel is unable to allocate IRQs or discover secondary PCI buses on your motherboard. - rom [X86-32] Assign address space to expansion ROMs. + rom [X86] Assign address space to expansion ROMs. Use with caution as certain devices share address decoders between ROMs and other resources. - norom [X86-32,X86_64] Do not assign address space to + norom [X86] Do not assign address space to expansion ROMs that do not already have BIOS assigned address ranges. - irqmask=0xMMMM [X86-32] Set a bit mask of IRQs allowed to be + irqmask=0xMMMM [X86] Set a bit mask of IRQs allowed to be assigned automatically to PCI devices. You can make the kernel exclude IRQs of your ISA cards this way. - pirqaddr=0xAAAAA [X86-32] Specify the physical address + pirqaddr=0xAAAAA [X86] Specify the physical address of the PIRQ table (normally generated by the BIOS) if it is outside the F0000h-100000h range. - lastbus=N [X86-32] Scan all buses thru bus #N. Can be + lastbus=N [X86] Scan all buses thru bus #N. Can be useful if the kernel is unable to find your secondary buses and you want to tell it explicitly which ones they are. - assign-busses [X86-32] Always assign all PCI bus + assign-busses [X86] Always assign all PCI bus numbers ourselves, overriding whatever the firmware may have done. - usepirqmask [X86-32] Honor the possible IRQ mask stored + usepirqmask [X86] Honor the possible IRQ mask stored in the BIOS $PIR table. This is needed on some systems with broken BIOSes, notably some HP Pavilion N5400 and Omnibook XE3 notebooks. This will have no effect if ACPI IRQ routing is enabled. - noacpi [X86-32] Do not use ACPI for IRQ routing + noacpi [X86] Do not use ACPI for IRQ routing or for PCI scanning. - use_crs [X86-32] Use _CRS for PCI resource + use_crs [X86] Use _CRS for PCI resource allocation. routeirq Do IRQ routing for all PCI devices. This is normally done in pci_enable_device(), @@ -1667,6 +1678,12 @@ and is between 256 and 4096 characters. It is defined in the file reserved for the CardBus bridge's memory window. The default value is 64 megabytes. + pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power + Management. + off Disable ASPM. + force Enable ASPM even on devices that claim not to support it. + WARNING: Forcing ASPM on may cause system lockups. + pcmv= [HW,PCMCIA] BadgePAD 4 pd. [PARIDE] diff --git a/Documentation/markers.txt b/Documentation/markers.txt index d9f50a19fa0c48185968e875aa2c9163bae3e6fa..089f6138fcd94249a6444ca3a932a50c263098e1 100644 --- a/Documentation/markers.txt +++ b/Documentation/markers.txt @@ -50,10 +50,12 @@ Connecting a function (probe) to a marker is done by providing a probe (function to call) for the specific marker through marker_probe_register() and can be activated by calling marker_arm(). Marker deactivation can be done by calling marker_disarm() as many times as marker_arm() has been called. Removing a probe -is done through marker_probe_unregister(); it will disarm the probe and make -sure there is no caller left using the probe when it returns. Probe removal is -preempt-safe because preemption is disabled around the probe call. See the -"Probe example" section below for a sample probe module. +is done through marker_probe_unregister(); it will disarm the probe. +marker_synchronize_unregister() must be called before the end of the module exit +function to make sure there is no caller left using the probe. This, and the +fact that preemption is disabled around the probe call, make sure that probe +removal and module unload are safe. See the "Probe example" section below for a +sample probe module. The marker mechanism supports inserting multiple instances of the same marker. Markers can be put in inline functions, inlined static functions, and diff --git a/Documentation/mtd/nand_ecc.txt b/Documentation/mtd/nand_ecc.txt new file mode 100644 index 0000000000000000000000000000000000000000..bdf93b7f0f2448bf3a2fd04d51cba13fe1e8029f --- /dev/null +++ b/Documentation/mtd/nand_ecc.txt @@ -0,0 +1,714 @@ +Introduction +============ + +Having looked at the linux mtd/nand driver and more specific at nand_ecc.c +I felt there was room for optimisation. I bashed the code for a few hours +performing tricks like table lookup removing superfluous code etc. +After that the speed was increased by 35-40%. +Still I was not too happy as I felt there was additional room for improvement. + +Bad! I was hooked. +I decided to annotate my steps in this file. Perhaps it is useful to someone +or someone learns something from it. + + +The problem +=========== + +NAND flash (at least SLC one) typically has sectors of 256 bytes. +However NAND flash is not extremely reliable so some error detection +(and sometimes correction) is needed. + +This is done by means of a Hamming code. I'll try to explain it in +laymans terms (and apologies to all the pro's in the field in case I do +not use the right terminology, my coding theory class was almost 30 +years ago, and I must admit it was not one of my favourites). + +As I said before the ecc calculation is performed on sectors of 256 +bytes. This is done by calculating several parity bits over the rows and +columns. The parity used is even parity which means that the parity bit = 1 +if the data over which the parity is calculated is 1 and the parity bit = 0 +if the data over which the parity is calculated is 0. So the total +number of bits over the data over which the parity is calculated + the +parity bit is even. (see wikipedia if you can't follow this). +Parity is often calculated by means of an exclusive or operation, +sometimes also referred to as xor. In C the operator for xor is ^ + +Back to ecc. +Let's give a small figure: + +byte 0: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp2 rp4 ... rp14 +byte 1: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp2 rp4 ... rp14 +byte 2: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp3 rp4 ... rp14 +byte 3: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp3 rp4 ... rp14 +byte 4: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp2 rp5 ... rp14 +.... +byte 254: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp3 rp5 ... rp15 +byte 255: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp3 rp5 ... rp15 + cp1 cp0 cp1 cp0 cp1 cp0 cp1 cp0 + cp3 cp3 cp2 cp2 cp3 cp3 cp2 cp2 + cp5 cp5 cp5 cp5 cp4 cp4 cp4 cp4 + +This figure represents a sector of 256 bytes. +cp is my abbreviaton for column parity, rp for row parity. + +Let's start to explain column parity. +cp0 is the parity that belongs to all bit0, bit2, bit4, bit6. +so the sum of all bit0, bit2, bit4 and bit6 values + cp0 itself is even. +Similarly cp1 is the sum of all bit1, bit3, bit5 and bit7. +cp2 is the parity over bit0, bit1, bit4 and bit5 +cp3 is the parity over bit2, bit3, bit6 and bit7. +cp4 is the parity over bit0, bit1, bit2 and bit3. +cp5 is the parity over bit4, bit5, bit6 and bit7. +Note that each of cp0 .. cp5 is exactly one bit. + +Row parity actually works almost the same. +rp0 is the parity of all even bytes (0, 2, 4, 6, ... 252, 254) +rp1 is the parity of all odd bytes (1, 3, 5, 7, ..., 253, 255) +rp2 is the parity of all bytes 0, 1, 4, 5, 8, 9, ... +(so handle two bytes, then skip 2 bytes). +rp3 is covers the half rp2 does not cover (bytes 2, 3, 6, 7, 10, 11, ...) +for rp4 the rule is cover 4 bytes, skip 4 bytes, cover 4 bytes, skip 4 etc. +so rp4 calculates parity over bytes 0, 1, 2, 3, 8, 9, 10, 11, 16, ...) +and rp5 covers the other half, so bytes 4, 5, 6, 7, 12, 13, 14, 15, 20, .. +The story now becomes quite boring. I guess you get the idea. +rp6 covers 8 bytes then skips 8 etc +rp7 skips 8 bytes then covers 8 etc +rp8 covers 16 bytes then skips 16 etc +rp9 skips 16 bytes then covers 16 etc +rp10 covers 32 bytes then skips 32 etc +rp11 skips 32 bytes then covers 32 etc +rp12 covers 64 bytes then skips 64 etc +rp13 skips 64 bytes then covers 64 etc +rp14 covers 128 bytes then skips 128 +rp15 skips 128 bytes then covers 128 + +In the end the parity bits are grouped together in three bytes as +follows: +ECC Bit 7 Bit 6 Bit 5 Bit 4 Bit 3 Bit 2 Bit 1 Bit 0 +ECC 0 rp07 rp06 rp05 rp04 rp03 rp02 rp01 rp00 +ECC 1 rp15 rp14 rp13 rp12 rp11 rp10 rp09 rp08 +ECC 2 cp5 cp4 cp3 cp2 cp1 cp0 1 1 + +I detected after writing this that ST application note AN1823 +(http://www.st.com/stonline/books/pdf/docs/10123.pdf) gives a much +nicer picture.(but they use line parity as term where I use row parity) +Oh well, I'm graphically challenged, so suffer with me for a moment :-) +And I could not reuse the ST picture anyway for copyright reasons. + + +Attempt 0 +========= + +Implementing the parity calculation is pretty simple. +In C pseudocode: +for (i = 0; i < 256; i++) +{ + if (i & 0x01) + rp1 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1; + else + rp0 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1; + if (i & 0x02) + rp3 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp3; + else + rp2 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp2; + if (i & 0x04) + rp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp5; + else + rp4 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp4; + if (i & 0x08) + rp7 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp7; + else + rp6 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp6; + if (i & 0x10) + rp9 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp9; + else + rp8 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp8; + if (i & 0x20) + rp11 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp11; + else + rp10 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp10; + if (i & 0x40) + rp13 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp13; + else + rp12 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp12; + if (i & 0x80) + rp15 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp15; + else + rp14 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp14; + cp0 = bit6 ^ bit4 ^ bit2 ^ bit0 ^ cp0; + cp1 = bit7 ^ bit5 ^ bit3 ^ bit1 ^ cp1; + cp2 = bit5 ^ bit4 ^ bit1 ^ bit0 ^ cp2; + cp3 = bit7 ^ bit6 ^ bit3 ^ bit2 ^ cp3 + cp4 = bit3 ^ bit2 ^ bit1 ^ bit0 ^ cp4 + cp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ cp5 +} + + +Analysis 0 +========== + +C does have bitwise operators but not really operators to do the above +efficiently (and most hardware has no such instructions either). +Therefore without implementing this it was clear that the code above was +not going to bring me a Nobel prize :-) + +Fortunately the exclusive or operation is commutative, so we can combine +the values in any order. So instead of calculating all the bits +individually, let us try to rearrange things. +For the column parity this is easy. We can just xor the bytes and in the +end filter out the relevant bits. This is pretty nice as it will bring +all cp calculation out of the if loop. + +Similarly we can first xor the bytes for the various rows. +This leads to: + + +Attempt 1 +========= + +const char parity[256] = { + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 +}; + +void ecc1(const unsigned char *buf, unsigned char *code) +{ + int i; + const unsigned char *bp = buf; + unsigned char cur; + unsigned char rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; + unsigned char rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; + unsigned char par; + + par = 0; + rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0; + rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0; + rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0; + rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0; + + for (i = 0; i < 256; i++) + { + cur = *bp++; + par ^= cur; + if (i & 0x01) rp1 ^= cur; else rp0 ^= cur; + if (i & 0x02) rp3 ^= cur; else rp2 ^= cur; + if (i & 0x04) rp5 ^= cur; else rp4 ^= cur; + if (i & 0x08) rp7 ^= cur; else rp6 ^= cur; + if (i & 0x10) rp9 ^= cur; else rp8 ^= cur; + if (i & 0x20) rp11 ^= cur; else rp10 ^= cur; + if (i & 0x40) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x80) rp15 ^= cur; else rp14 ^= cur; + } + code[0] = + (parity[rp7] << 7) | + (parity[rp6] << 6) | + (parity[rp5] << 5) | + (parity[rp4] << 4) | + (parity[rp3] << 3) | + (parity[rp2] << 2) | + (parity[rp1] << 1) | + (parity[rp0]); + code[1] = + (parity[rp15] << 7) | + (parity[rp14] << 6) | + (parity[rp13] << 5) | + (parity[rp12] << 4) | + (parity[rp11] << 3) | + (parity[rp10] << 2) | + (parity[rp9] << 1) | + (parity[rp8]); + code[2] = + (parity[par & 0xf0] << 7) | + (parity[par & 0x0f] << 6) | + (parity[par & 0xcc] << 5) | + (parity[par & 0x33] << 4) | + (parity[par & 0xaa] << 3) | + (parity[par & 0x55] << 2); + code[0] = ~code[0]; + code[1] = ~code[1]; + code[2] = ~code[2]; +} + +Still pretty straightforward. The last three invert statements are there to +give a checksum of 0xff 0xff 0xff for an empty flash. In an empty flash +all data is 0xff, so the checksum then matches. + +I also introduced the parity lookup. I expected this to be the fastest +way to calculate the parity, but I will investigate alternatives later +on. + + +Analysis 1 +========== + +The code works, but is not terribly efficient. On my system it took +almost 4 times as much time as the linux driver code. But hey, if it was +*that* easy this would have been done long before. +No pain. no gain. + +Fortunately there is plenty of room for improvement. + +In step 1 we moved from bit-wise calculation to byte-wise calculation. +However in C we can also use the unsigned long data type and virtually +every modern microprocessor supports 32 bit operations, so why not try +to write our code in such a way that we process data in 32 bit chunks. + +Of course this means some modification as the row parity is byte by +byte. A quick analysis: +for the column parity we use the par variable. When extending to 32 bits +we can in the end easily calculate p0 and p1 from it. +(because par now consists of 4 bytes, contributing to rp1, rp0, rp1, rp0 +respectively) +also rp2 and rp3 can be easily retrieved from par as rp3 covers the +first two bytes and rp2 the last two bytes. + +Note that of course now the loop is executed only 64 times (256/4). +And note that care must taken wrt byte ordering. The way bytes are +ordered in a long is machine dependent, and might affect us. +Anyway, if there is an issue: this code is developed on x86 (to be +precise: a DELL PC with a D920 Intel CPU) + +And of course the performance might depend on alignment, but I expect +that the I/O buffers in the nand driver are aligned properly (and +otherwise that should be fixed to get maximum performance). + +Let's give it a try... + + +Attempt 2 +========= + +extern const char parity[256]; + +void ecc2(const unsigned char *buf, unsigned char *code) +{ + int i; + const unsigned long *bp = (unsigned long *)buf; + unsigned long cur; + unsigned long rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; + unsigned long rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; + unsigned long par; + + par = 0; + rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0; + rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0; + rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0; + rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0; + + for (i = 0; i < 64; i++) + { + cur = *bp++; + par ^= cur; + if (i & 0x01) rp5 ^= cur; else rp4 ^= cur; + if (i & 0x02) rp7 ^= cur; else rp6 ^= cur; + if (i & 0x04) rp9 ^= cur; else rp8 ^= cur; + if (i & 0x08) rp11 ^= cur; else rp10 ^= cur; + if (i & 0x10) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x20) rp15 ^= cur; else rp14 ^= cur; + } + /* + we need to adapt the code generation for the fact that rp vars are now + long; also the column parity calculation needs to be changed. + we'll bring rp4 to 15 back to single byte entities by shifting and + xoring + */ + rp4 ^= (rp4 >> 16); rp4 ^= (rp4 >> 8); rp4 &= 0xff; + rp5 ^= (rp5 >> 16); rp5 ^= (rp5 >> 8); rp5 &= 0xff; + rp6 ^= (rp6 >> 16); rp6 ^= (rp6 >> 8); rp6 &= 0xff; + rp7 ^= (rp7 >> 16); rp7 ^= (rp7 >> 8); rp7 &= 0xff; + rp8 ^= (rp8 >> 16); rp8 ^= (rp8 >> 8); rp8 &= 0xff; + rp9 ^= (rp9 >> 16); rp9 ^= (rp9 >> 8); rp9 &= 0xff; + rp10 ^= (rp10 >> 16); rp10 ^= (rp10 >> 8); rp10 &= 0xff; + rp11 ^= (rp11 >> 16); rp11 ^= (rp11 >> 8); rp11 &= 0xff; + rp12 ^= (rp12 >> 16); rp12 ^= (rp12 >> 8); rp12 &= 0xff; + rp13 ^= (rp13 >> 16); rp13 ^= (rp13 >> 8); rp13 &= 0xff; + rp14 ^= (rp14 >> 16); rp14 ^= (rp14 >> 8); rp14 &= 0xff; + rp15 ^= (rp15 >> 16); rp15 ^= (rp15 >> 8); rp15 &= 0xff; + rp3 = (par >> 16); rp3 ^= (rp3 >> 8); rp3 &= 0xff; + rp2 = par & 0xffff; rp2 ^= (rp2 >> 8); rp2 &= 0xff; + par ^= (par >> 16); + rp1 = (par >> 8); rp1 &= 0xff; + rp0 = (par & 0xff); + par ^= (par >> 8); par &= 0xff; + + code[0] = + (parity[rp7] << 7) | + (parity[rp6] << 6) | + (parity[rp5] << 5) | + (parity[rp4] << 4) | + (parity[rp3] << 3) | + (parity[rp2] << 2) | + (parity[rp1] << 1) | + (parity[rp0]); + code[1] = + (parity[rp15] << 7) | + (parity[rp14] << 6) | + (parity[rp13] << 5) | + (parity[rp12] << 4) | + (parity[rp11] << 3) | + (parity[rp10] << 2) | + (parity[rp9] << 1) | + (parity[rp8]); + code[2] = + (parity[par & 0xf0] << 7) | + (parity[par & 0x0f] << 6) | + (parity[par & 0xcc] << 5) | + (parity[par & 0x33] << 4) | + (parity[par & 0xaa] << 3) | + (parity[par & 0x55] << 2); + code[0] = ~code[0]; + code[1] = ~code[1]; + code[2] = ~code[2]; +} + +The parity array is not shown any more. Note also that for these +examples I kinda deviated from my regular programming style by allowing +multiple statements on a line, not using { } in then and else blocks +with only a single statement and by using operators like ^= + + +Analysis 2 +========== + +The code (of course) works, and hurray: we are a little bit faster than +the linux driver code (about 15%). But wait, don't cheer too quickly. +THere is more to be gained. +If we look at e.g. rp14 and rp15 we see that we either xor our data with +rp14 or with rp15. However we also have par which goes over all data. +This means there is no need to calculate rp14 as it can be calculated from +rp15 through rp14 = par ^ rp15; +(or if desired we can avoid calculating rp15 and calculate it from +rp14). That is why some places refer to inverse parity. +Of course the same thing holds for rp4/5, rp6/7, rp8/9, rp10/11 and rp12/13. +Effectively this means we can eliminate the else clause from the if +statements. Also we can optimise the calculation in the end a little bit +by going from long to byte first. Actually we can even avoid the table +lookups + +Attempt 3 +========= + +Odd replaced: + if (i & 0x01) rp5 ^= cur; else rp4 ^= cur; + if (i & 0x02) rp7 ^= cur; else rp6 ^= cur; + if (i & 0x04) rp9 ^= cur; else rp8 ^= cur; + if (i & 0x08) rp11 ^= cur; else rp10 ^= cur; + if (i & 0x10) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x20) rp15 ^= cur; else rp14 ^= cur; +with + if (i & 0x01) rp5 ^= cur; + if (i & 0x02) rp7 ^= cur; + if (i & 0x04) rp9 ^= cur; + if (i & 0x08) rp11 ^= cur; + if (i & 0x10) rp13 ^= cur; + if (i & 0x20) rp15 ^= cur; + + and outside the loop added: + rp4 = par ^ rp5; + rp6 = par ^ rp7; + rp8 = par ^ rp9; + rp10 = par ^ rp11; + rp12 = par ^ rp13; + rp14 = par ^ rp15; + +And after that the code takes about 30% more time, although the number of +statements is reduced. This is also reflected in the assembly code. + + +Analysis 3 +========== + +Very weird. Guess it has to do with caching or instruction parallellism +or so. I also tried on an eeePC (Celeron, clocked at 900 Mhz). Interesting +observation was that this one is only 30% slower (according to time) +executing the code as my 3Ghz D920 processor. + +Well, it was expected not to be easy so maybe instead move to a +different track: let's move back to the code from attempt2 and do some +loop unrolling. This will eliminate a few if statements. I'll try +different amounts of unrolling to see what works best. + + +Attempt 4 +========= + +Unrolled the loop 1, 2, 3 and 4 times. +For 4 the code starts with: + + for (i = 0; i < 4; i++) + { + cur = *bp++; + par ^= cur; + rp4 ^= cur; + rp6 ^= cur; + rp8 ^= cur; + rp10 ^= cur; + if (i & 0x1) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x2) rp15 ^= cur; else rp14 ^= cur; + cur = *bp++; + par ^= cur; + rp5 ^= cur; + rp6 ^= cur; + ... + + +Analysis 4 +========== + +Unrolling once gains about 15% +Unrolling twice keeps the gain at about 15% +Unrolling three times gives a gain of 30% compared to attempt 2. +Unrolling four times gives a marginal improvement compared to unrolling +three times. + +I decided to proceed with a four time unrolled loop anyway. It was my gut +feeling that in the next steps I would obtain additional gain from it. + +The next step was triggered by the fact that par contains the xor of all +bytes and rp4 and rp5 each contain the xor of half of the bytes. +So in effect par = rp4 ^ rp5. But as xor is commutative we can also say +that rp5 = par ^ rp4. So no need to keep both rp4 and rp5 around. We can +eliminate rp5 (or rp4, but I already foresaw another optimisation). +The same holds for rp6/7, rp8/9, rp10/11 rp12/13 and rp14/15. + + +Attempt 5 +========= + +Effectively so all odd digit rp assignments in the loop were removed. +This included the else clause of the if statements. +Of course after the loop we need to correct things by adding code like: + rp5 = par ^ rp4; +Also the initial assignments (rp5 = 0; etc) could be removed. +Along the line I also removed the initialisation of rp0/1/2/3. + + +Analysis 5 +========== + +Measurements showed this was a good move. The run-time roughly halved +compared with attempt 4 with 4 times unrolled, and we only require 1/3rd +of the processor time compared to the current code in the linux kernel. + +However, still I thought there was more. I didn't like all the if +statements. Why not keep a running parity and only keep the last if +statement. Time for yet another version! + + +Attempt 6 +========= + +THe code within the for loop was changed to: + + for (i = 0; i < 4; i++) + { + cur = *bp++; tmppar = cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= tmppar; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp8 ^= tmppar; + + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp10 ^= tmppar; + + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; rp8 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; rp8 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp8 ^= cur; + cur = *bp++; tmppar ^= cur; rp8 ^= cur; + + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; + + par ^= tmppar; + if ((i & 0x1) == 0) rp12 ^= tmppar; + if ((i & 0x2) == 0) rp14 ^= tmppar; + } + +As you can see tmppar is used to accumulate the parity within a for +iteration. In the last 3 statements is is added to par and, if needed, +to rp12 and rp14. + +While making the changes I also found that I could exploit that tmppar +contains the running parity for this iteration. So instead of having: +rp4 ^= cur; rp6 = cur; +I removed the rp6 = cur; statement and did rp6 ^= tmppar; on next +statement. A similar change was done for rp8 and rp10 + + +Analysis 6 +========== + +Measuring this code again showed big gain. When executing the original +linux code 1 million times, this took about 1 second on my system. +(using time to measure the performance). After this iteration I was back +to 0.075 sec. Actually I had to decide to start measuring over 10 +million interations in order not to loose too much accuracy. This one +definitely seemed to be the jackpot! + +There is a little bit more room for improvement though. There are three +places with statements: +rp4 ^= cur; rp6 ^= cur; +It seems more efficient to also maintain a variable rp4_6 in the while +loop; This eliminates 3 statements per loop. Of course after the loop we +need to correct by adding: + rp4 ^= rp4_6; + rp6 ^= rp4_6 +Furthermore there are 4 sequential assingments to rp8. This can be +encoded slightly more efficient by saving tmppar before those 4 lines +and later do rp8 = rp8 ^ tmppar ^ notrp8; +(where notrp8 is the value of rp8 before those 4 lines). +Again a use of the commutative property of xor. +Time for a new test! + + +Attempt 7 +========= + +The new code now looks like: + + for (i = 0; i < 4; i++) + { + cur = *bp++; tmppar = cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= tmppar; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp8 ^= tmppar; + + cur = *bp++; tmppar ^= cur; rp4_6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp10 ^= tmppar; + + notrp8 = tmppar; + cur = *bp++; tmppar ^= cur; rp4_6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; + rp8 = rp8 ^ tmppar ^ notrp8; + + cur = *bp++; tmppar ^= cur; rp4_6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; + + par ^= tmppar; + if ((i & 0x1) == 0) rp12 ^= tmppar; + if ((i & 0x2) == 0) rp14 ^= tmppar; + } + rp4 ^= rp4_6; + rp6 ^= rp4_6; + + +Not a big change, but every penny counts :-) + + +Analysis 7 +========== + +Acutally this made things worse. Not very much, but I don't want to move +into the wrong direction. Maybe something to investigate later. Could +have to do with caching again. + +Guess that is what there is to win within the loop. Maybe unrolling one +more time will help. I'll keep the optimisations from 7 for now. + + +Attempt 8 +========= + +Unrolled the loop one more time. + + +Analysis 8 +========== + +This makes things worse. Let's stick with attempt 6 and continue from there. +Although it seems that the code within the loop cannot be optimised +further there is still room to optimize the generation of the ecc codes. +We can simply calcualate the total parity. If this is 0 then rp4 = rp5 +etc. If the parity is 1, then rp4 = !rp5; +But if rp4 = rp5 we do not need rp5 etc. We can just write the even bits +in the result byte and then do something like + code[0] |= (code[0] << 1); +Lets test this. + + +Attempt 9 +========= + +Changed the code but again this slightly degrades performance. Tried all +kind of other things, like having dedicated parity arrays to avoid the +shift after parity[rp7] << 7; No gain. +Change the lookup using the parity array by using shift operators (e.g. +replace parity[rp7] << 7 with: +rp7 ^= (rp7 << 4); +rp7 ^= (rp7 << 2); +rp7 ^= (rp7 << 1); +rp7 &= 0x80; +No gain. + +The only marginal change was inverting the parity bits, so we can remove +the last three invert statements. + +Ah well, pity this does not deliver more. Then again 10 million +iterations using the linux driver code takes between 13 and 13.5 +seconds, whereas my code now takes about 0.73 seconds for those 10 +million iterations. So basically I've improved the performance by a +factor 18 on my system. Not that bad. Of course on different hardware +you will get different results. No warranties! + +But of course there is no such thing as a free lunch. The codesize almost +tripled (from 562 bytes to 1434 bytes). Then again, it is not that much. + + +Correcting errors +================= + +For correcting errors I again used the ST application note as a starter, +but I also peeked at the existing code. +The algorithm itself is pretty straightforward. Just xor the given and +the calculated ecc. If all bytes are 0 there is no problem. If 11 bits +are 1 we have one correctable bit error. If there is 1 bit 1, we have an +error in the given ecc code. +It proved to be fastest to do some table lookups. Performance gain +introduced by this is about a factor 2 on my system when a repair had to +be done, and 1% or so if no repair had to be done. +Code size increased from 330 bytes to 686 bytes for this function. +(gcc 4.2, -O3) + + +Conclusion +========== + +The gain when calculating the ecc is tremendous. Om my development hardware +a speedup of a factor of 18 for ecc calculation was achieved. On a test on an +embedded system with a MIPS core a factor 7 was obtained. +On a test with a Linksys NSLU2 (ARMv5TE processor) the speedup was a factor +5 (big endian mode, gcc 4.1.2, -O3) +For correction not much gain could be obtained (as bitflips are rare). Then +again there are also much less cycles spent there. + +It seems there is not much more gain possible in this, at least when +programmed in C. Of course it might be possible to squeeze something more +out of it with an assembler program, but due to pipeline behaviour etc +this is very tricky (at least for intel hw). + +Author: Frans Meulenbroeks +Copyright (C) 2008 Koninklijke Philips Electronics NV. diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index 5ce0952aa06546096fc165d9562f35565ed17025..10a0263ebb3f01e832c7827cc75d7fe54b341a6f 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt @@ -95,7 +95,9 @@ On all - write a character to /proc/sysrq-trigger. e.g.: 'p' - Will dump the current registers and flags to your console. -'q' - Will dump a list of all running timers. +'q' - Will dump per CPU lists of all armed hrtimers (but NOT regular + timer_list timers) and detailed information about all + clockevent devices. 'r' - Turns off keyboard raw mode and sets it to XLATE. diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d354e16749447c667934ecd4c8f4fe5ee3da36c --- /dev/null +++ b/Documentation/tracepoints.txt @@ -0,0 +1,101 @@ + Using the Linux Kernel Tracepoints + + Mathieu Desnoyers + + +This document introduces Linux Kernel Tracepoints and their use. It provides +examples of how to insert tracepoints in the kernel and connect probe functions +to them and provides some examples of probe functions. + + +* Purpose of tracepoints + +A tracepoint placed in code provides a hook to call a function (probe) that you +can provide at runtime. A tracepoint can be "on" (a probe is connected to it) or +"off" (no probe is attached). When a tracepoint is "off" it has no effect, +except for adding a tiny time penalty (checking a condition for a branch) and +space penalty (adding a few bytes for the function call at the end of the +instrumented function and adds a data structure in a separate section). When a +tracepoint is "on", the function you provide is called each time the tracepoint +is executed, in the execution context of the caller. When the function provided +ends its execution, it returns to the caller (continuing from the tracepoint +site). + +You can put tracepoints at important locations in the code. They are +lightweight hooks that can pass an arbitrary number of parameters, +which prototypes are described in a tracepoint declaration placed in a header +file. + +They can be used for tracing and performance accounting. + + +* Usage + +Two elements are required for tracepoints : + +- A tracepoint definition, placed in a header file. +- The tracepoint statement, in C code. + +In order to use tracepoints, you should include linux/tracepoint.h. + +In include/trace/subsys.h : + +#include + +DEFINE_TRACE(subsys_eventname, + TPPTOTO(int firstarg, struct task_struct *p), + TPARGS(firstarg, p)); + +In subsys/file.c (where the tracing statement must be added) : + +#include + +void somefct(void) +{ + ... + trace_subsys_eventname(arg, task); + ... +} + +Where : +- subsys_eventname is an identifier unique to your event + - subsys is the name of your subsystem. + - eventname is the name of the event to trace. +- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the function + called by this tracepoint. +- TPARGS(firstarg, p) are the parameters names, same as found in the prototype. + +Connecting a function (probe) to a tracepoint is done by providing a probe +(function to call) for the specific tracepoint through +register_trace_subsys_eventname(). Removing a probe is done through +unregister_trace_subsys_eventname(); it will remove the probe sure there is no +caller left using the probe when it returns. Probe removal is preempt-safe +because preemption is disabled around the probe call. See the "Probe example" +section below for a sample probe module. + +The tracepoint mechanism supports inserting multiple instances of the same +tracepoint, but a single definition must be made of a given tracepoint name over +all the kernel to make sure no type conflict will occur. Name mangling of the +tracepoints is done using the prototypes to make sure typing is correct. +Verification of probe type correctness is done at the registration site by the +compiler. Tracepoints can be put in inline functions, inlined static functions, +and unrolled loops as well as regular functions. + +The naming scheme "subsys_event" is suggested here as a convention intended +to limit collisions. Tracepoint names are global to the kernel: they are +considered as being the same whether they are in the core kernel image or in +modules. + + +* Probe / tracepoint example + +See the example provided in samples/tracepoints/src + +Compile them with your kernel. + +Run, as root : +modprobe tracepoint-example (insmod order is not important) +modprobe tracepoint-probe-example +cat /proc/tracepoint-example (returns an expected error) +rmmod tracepoint-example tracepoint-probe-example +dmesg diff --git a/Documentation/tracers/mmiotrace.txt b/Documentation/tracers/mmiotrace.txt index a4afb560a45bfa9c7429f18d54d0f8b4c194b116..5bbbe2096223f69bc1f0d7007f6d07a3385e4fd4 100644 --- a/Documentation/tracers/mmiotrace.txt +++ b/Documentation/tracers/mmiotrace.txt @@ -36,7 +36,7 @@ $ mount -t debugfs debugfs /debug $ echo mmiotrace > /debug/tracing/current_tracer $ cat /debug/tracing/trace_pipe > mydump.txt & Start X or whatever. -$ echo "X is up" > /debug/tracing/marker +$ echo "X is up" > /debug/tracing/trace_marker $ echo none > /debug/tracing/current_tracer Check for lost events. @@ -59,9 +59,8 @@ The 'cat' process should stay running (sleeping) in the background. Load the driver you want to trace and use it. Mmiotrace will only catch MMIO accesses to areas that are ioremapped while mmiotrace is active. -[Unimplemented feature:] During tracing you can place comments (markers) into the trace by -$ echo "X is up" > /debug/tracing/marker +$ echo "X is up" > /debug/tracing/trace_marker This makes it easier to see which part of the (huge) trace corresponds to which action. It is recommended to place descriptive markers about what you do. diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt new file mode 100644 index 0000000000000000000000000000000000000000..125eed560e5a1bfb25d08ee7ad903d3efd8faef4 --- /dev/null +++ b/Documentation/vm/unevictable-lru.txt @@ -0,0 +1,615 @@ + +This document describes the Linux memory management "Unevictable LRU" +infrastructure and the use of this infrastructure to manage several types +of "unevictable" pages. The document attempts to provide the overall +rationale behind this mechanism and the rationale for some of the design +decisions that drove the implementation. The latter design rationale is +discussed in the context of an implementation description. Admittedly, one +can obtain the implementation details--the "what does it do?"--by reading the +code. One hopes that the descriptions below add value by provide the answer +to "why does it do that?". + +Unevictable LRU Infrastructure: + +The Unevictable LRU adds an additional LRU list to track unevictable pages +and to hide these pages from vmscan. This mechanism is based on a patch by +Larry Woodman of Red Hat to address several scalability problems with page +reclaim in Linux. The problems have been observed at customer sites on large +memory x86_64 systems. For example, a non-numal x86_64 platform with 128GB +of main memory will have over 32 million 4k pages in a single zone. When a +large fraction of these pages are not evictable for any reason [see below], +vmscan will spend a lot of time scanning the LRU lists looking for the small +fraction of pages that are evictable. This can result in a situation where +all cpus are spending 100% of their time in vmscan for hours or days on end, +with the system completely unresponsive. + +The Unevictable LRU infrastructure addresses the following classes of +unevictable pages: + ++ page owned by ramfs ++ page mapped into SHM_LOCKed shared memory regions ++ page mapped into VM_LOCKED [mlock()ed] vmas + +The infrastructure might be able to handle other conditions that make pages +unevictable, either by definition or by circumstance, in the future. + + +The Unevictable LRU List + +The Unevictable LRU infrastructure consists of an additional, per-zone, LRU list +called the "unevictable" list and an associated page flag, PG_unevictable, to +indicate that the page is being managed on the unevictable list. The +PG_unevictable flag is analogous to, and mutually exclusive with, the PG_active +flag in that it indicates on which LRU list a page resides when PG_lru is set. +The unevictable LRU list is source configurable based on the UNEVICTABLE_LRU +Kconfig option. + +The Unevictable LRU infrastructure maintains unevictable pages on an additional +LRU list for a few reasons: + +1) We get to "treat unevictable pages just like we treat other pages in the + system, which means we get to use the same code to manipulate them, the + same code to isolate them (for migrate, etc.), the same code to keep track + of the statistics, etc..." [Rik van Riel] + +2) We want to be able to migrate unevictable pages between nodes--for memory + defragmentation, workload management and memory hotplug. The linux kernel + can only migrate pages that it can successfully isolate from the lru lists. + If we were to maintain pages elsewise than on an lru-like list, where they + can be found by isolate_lru_page(), we would prevent their migration, unless + we reworked migration code to find the unevictable pages. + + +The unevictable LRU list does not differentiate between file backed and swap +backed [anon] pages. This differentiation is only important while the pages +are, in fact, evictable. + +The unevictable LRU list benefits from the "arrayification" of the per-zone +LRU lists and statistics originally proposed and posted by Christoph Lameter. + +The unevictable list does not use the lru pagevec mechanism. Rather, +unevictable pages are placed directly on the page's zone's unevictable +list under the zone lru_lock. The reason for this is to prevent stranding +of pages on the unevictable list when one task has the page isolated from the +lru and other tasks are changing the "evictability" state of the page. + + +Unevictable LRU and Memory Controller Interaction + +The memory controller data structure automatically gets a per zone unevictable +lru list as a result of the "arrayification" of the per-zone LRU lists. The +memory controller tracks the movement of pages to and from the unevictable list. +When a memory control group comes under memory pressure, the controller will +not attempt to reclaim pages on the unevictable list. This has a couple of +effects. Because the pages are "hidden" from reclaim on the unevictable list, +the reclaim process can be more efficient, dealing only with pages that have +a chance of being reclaimed. On the other hand, if too many of the pages +charged to the control group are unevictable, the evictable portion of the +working set of the tasks in the control group may not fit into the available +memory. This can cause the control group to thrash or to oom-kill tasks. + + +Unevictable LRU: Detecting Unevictable Pages + +The function page_evictable(page, vma) in vmscan.c determines whether a +page is evictable or not. For ramfs pages and pages in SHM_LOCKed regions, +page_evictable() tests a new address space flag, AS_UNEVICTABLE, in the page's +address space using a wrapper function. Wrapper functions are used to set, +clear and test the flag to reduce the requirement for #ifdef's throughout the +source code. AS_UNEVICTABLE is set on ramfs inode/mapping when it is created. +This flag remains for the life of the inode. + +For shared memory regions, AS_UNEVICTABLE is set when an application +successfully SHM_LOCKs the region and is removed when the region is +SHM_UNLOCKed. Note that shmctl(SHM_LOCK, ...) does not populate the page +tables for the region as does, for example, mlock(). So, we make no special +effort to push any pages in the SHM_LOCKed region to the unevictable list. +Vmscan will do this when/if it encounters the pages during reclaim. On +SHM_UNLOCK, shmctl() scans the pages in the region and "rescues" them from the +unevictable list if no other condition keeps them unevictable. If a SHM_LOCKed +region is destroyed, the pages are also "rescued" from the unevictable list in +the process of freeing them. + +page_evictable() detects mlock()ed pages by testing an additional page flag, +PG_mlocked via the PageMlocked() wrapper. If the page is NOT mlocked, and a +non-NULL vma is supplied, page_evictable() will check whether the vma is +VM_LOCKED via is_mlocked_vma(). is_mlocked_vma() will SetPageMlocked() and +update the appropriate statistics if the vma is VM_LOCKED. This method allows +efficient "culling" of pages in the fault path that are being faulted in to +VM_LOCKED vmas. + + +Unevictable Pages and Vmscan [shrink_*_list()] + +If unevictable pages are culled in the fault path, or moved to the unevictable +list at mlock() or mmap() time, vmscan will never encounter the pages until +they have become evictable again, for example, via munlock() and have been +"rescued" from the unevictable list. However, there may be situations where we +decide, for the sake of expediency, to leave a unevictable page on one of the +regular active/inactive LRU lists for vmscan to deal with. Vmscan checks for +such pages in all of the shrink_{active|inactive|page}_list() functions and +will "cull" such pages that it encounters--that is, it diverts those pages to +the unevictable list for the zone being scanned. + +There may be situations where a page is mapped into a VM_LOCKED vma, but the +page is not marked as PageMlocked. Such pages will make it all the way to +shrink_page_list() where they will be detected when vmscan walks the reverse +map in try_to_unmap(). If try_to_unmap() returns SWAP_MLOCK, shrink_page_list() +will cull the page at that point. + +Note that for anonymous pages, shrink_page_list() attempts to add the page to +the swap cache before it tries to unmap the page. To avoid this unnecessary +consumption of swap space, shrink_page_list() calls try_to_munlock() to check +whether any VM_LOCKED vmas map the page without attempting to unmap the page. +If try_to_munlock() returns SWAP_MLOCK, shrink_page_list() will cull the page +without consuming swap space. try_to_munlock() will be described below. + +To "cull" an unevictable page, vmscan simply puts the page back on the lru +list using putback_lru_page()--the inverse operation to isolate_lru_page()-- +after dropping the page lock. Because the condition which makes the page +unevictable may change once the page is unlocked, putback_lru_page() will +recheck the unevictable state of a page that it places on the unevictable lru +list. If the page has become unevictable, putback_lru_page() removes it from +the list and retries, including the page_unevictable() test. Because such a +race is a rare event and movement of pages onto the unevictable list should be +rare, these extra evictabilty checks should not occur in the majority of calls +to putback_lru_page(). + + +Mlocked Page: Prior Work + +The "Unevictable Mlocked Pages" infrastructure is based on work originally +posted by Nick Piggin in an RFC patch entitled "mm: mlocked pages off LRU". +Nick posted his patch as an alternative to a patch posted by Christoph +Lameter to achieve the same objective--hiding mlocked pages from vmscan. +In Nick's patch, he used one of the struct page lru list link fields as a count +of VM_LOCKED vmas that map the page. This use of the link field for a count +prevented the management of the pages on an LRU list. Thus, mlocked pages were +not migratable as isolate_lru_page() could not find them and the lru list link +field was not available to the migration subsystem. Nick resolved this by +putting mlocked pages back on the lru list before attempting to isolate them, +thus abandoning the count of VM_LOCKED vmas. When Nick's patch was integrated +with the Unevictable LRU work, the count was replaced by walking the reverse +map to determine whether any VM_LOCKED vmas mapped the page. More on this +below. + + +Mlocked Pages: Basic Management + +Mlocked pages--pages mapped into a VM_LOCKED vma--represent one class of +unevictable pages. When such a page has been "noticed" by the memory +management subsystem, the page is marked with the PG_mlocked [PageMlocked()] +flag. A PageMlocked() page will be placed on the unevictable LRU list when +it is added to the LRU. Pages can be "noticed" by memory management in +several places: + +1) in the mlock()/mlockall() system call handlers. +2) in the mmap() system call handler when mmap()ing a region with the + MAP_LOCKED flag, or mmap()ing a region in a task that has called + mlockall() with the MCL_FUTURE flag. Both of these conditions result + in the VM_LOCKED flag being set for the vma. +3) in the fault path, if mlocked pages are "culled" in the fault path, + and when a VM_LOCKED stack segment is expanded. +4) as mentioned above, in vmscan:shrink_page_list() with attempting to + reclaim a page in a VM_LOCKED vma--via try_to_unmap() or try_to_munlock(). + +Mlocked pages become unlocked and rescued from the unevictable list when: + +1) mapped in a range unlocked via the munlock()/munlockall() system calls. +2) munmapped() out of the last VM_LOCKED vma that maps the page, including + unmapping at task exit. +3) when the page is truncated from the last VM_LOCKED vma of an mmap()ed file. +4) before a page is COWed in a VM_LOCKED vma. + + +Mlocked Pages: mlock()/mlockall() System Call Handling + +Both [do_]mlock() and [do_]mlockall() system call handlers call mlock_fixup() +for each vma in the range specified by the call. In the case of mlockall(), +this is the entire active address space of the task. Note that mlock_fixup() +is used for both mlock()ing and munlock()ing a range of memory. A call to +mlock() an already VM_LOCKED vma, or to munlock() a vma that is not VM_LOCKED +is treated as a no-op--mlock_fixup() simply returns. + +If the vma passes some filtering described in "Mlocked Pages: Filtering Vmas" +below, mlock_fixup() will attempt to merge the vma with its neighbors or split +off a subset of the vma if the range does not cover the entire vma. Once the +vma has been merged or split or neither, mlock_fixup() will call +__mlock_vma_pages_range() to fault in the pages via get_user_pages() and +to mark the pages as mlocked via mlock_vma_page(). + +Note that the vma being mlocked might be mapped with PROT_NONE. In this case, +get_user_pages() will be unable to fault in the pages. That's OK. If pages +do end up getting faulted into this VM_LOCKED vma, we'll handle them in the +fault path or in vmscan. + +Also note that a page returned by get_user_pages() could be truncated or +migrated out from under us, while we're trying to mlock it. To detect +this, __mlock_vma_pages_range() tests the page_mapping after acquiring +the page lock. If the page is still associated with its mapping, we'll +go ahead and call mlock_vma_page(). If the mapping is gone, we just +unlock the page and move on. Worse case, this results in page mapped +in a VM_LOCKED vma remaining on a normal LRU list without being +PageMlocked(). Again, vmscan will detect and cull such pages. + +mlock_vma_page(), called with the page locked [N.B., not "mlocked"], will +TestSetPageMlocked() for each page returned by get_user_pages(). We use +TestSetPageMlocked() because the page might already be mlocked by another +task/vma and we don't want to do extra work. We especially do not want to +count an mlocked page more than once in the statistics. If the page was +already mlocked, mlock_vma_page() is done. + +If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the +page from the LRU, as it is likely on the appropriate active or inactive list +at that time. If the isolate_lru_page() succeeds, mlock_vma_page() will +putback the page--putback_lru_page()--which will notice that the page is now +mlocked and divert the page to the zone's unevictable LRU list. If +mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle +it later if/when it attempts to reclaim the page. + + +Mlocked Pages: Filtering Special Vmas + +mlock_fixup() filters several classes of "special" vmas: + +1) vmas with VM_IO|VM_PFNMAP set are skipped entirely. The pages behind + these mappings are inherently pinned, so we don't need to mark them as + mlocked. In any case, most of the pages have no struct page in which to + so mark the page. Because of this, get_user_pages() will fail for these + vmas, so there is no sense in attempting to visit them. + +2) vmas mapping hugetlbfs page are already effectively pinned into memory. + We don't need nor want to mlock() these pages. However, to preserve the + prior behavior of mlock()--before the unevictable/mlock changes--mlock_fixup() + will call make_pages_present() in the hugetlbfs vma range to allocate the + huge pages and populate the ptes. + +3) vmas with VM_DONTEXPAND|VM_RESERVED are generally user space mappings of + kernel pages, such as the vdso page, relay channel pages, etc. These pages + are inherently unevictable and are not managed on the LRU lists. + mlock_fixup() treats these vmas the same as hugetlbfs vmas. It calls + make_pages_present() to populate the ptes. + +Note that for all of these special vmas, mlock_fixup() does not set the +VM_LOCKED flag. Therefore, we won't have to deal with them later during +munlock() or munmap()--for example, at task exit. Neither does mlock_fixup() +account these vmas against the task's "locked_vm". + +Mlocked Pages: Downgrading the Mmap Semaphore. + +mlock_fixup() must be called with the mmap semaphore held for write, because +it may have to merge or split vmas. However, mlocking a large region of +memory can take a long time--especially if vmscan must reclaim pages to +satisfy the regions requirements. Faulting in a large region with the mmap +semaphore held for write can hold off other faults on the address space, in +the case of a multi-threaded task. It can also hold off scans of the task's +address space via /proc. While testing under heavy load, it was observed that +the ps(1) command could be held off for many minutes while a large segment was +mlock()ed down. + +To address this issue, and to make the system more responsive during mlock()ing +of large segments, mlock_fixup() downgrades the mmap semaphore to read mode +during the call to __mlock_vma_pages_range(). This works fine. However, the +callers of mlock_fixup() expect the semaphore to be returned in write mode. +So, mlock_fixup() "upgrades" the semphore to write mode. Linux does not +support an atomic upgrade_sem() call, so mlock_fixup() must drop the semaphore +and reacquire it in write mode. In a multi-threaded task, it is possible for +the task memory map to change while the semaphore is dropped. Therefore, +mlock_fixup() looks up the vma at the range start address after reacquiring +the semaphore in write mode and verifies that it still covers the original +range. If not, mlock_fixup() returns an error [-EAGAIN]. All callers of +mlock_fixup() have been changed to deal with this new error condition. + +Note: when munlocking a region, all of the pages should already be resident-- +unless we have racing threads mlocking() and munlocking() regions. So, +unlocking should not have to wait for page allocations nor faults of any kind. +Therefore mlock_fixup() does not downgrade the semaphore for munlock(). + + +Mlocked Pages: munlock()/munlockall() System Call Handling + +The munlock() and munlockall() system calls are handled by the same functions-- +do_mlock[all]()--as the mlock() and mlockall() system calls with the unlock +vs lock operation indicated by an argument. So, these system calls are also +handled by mlock_fixup(). Again, if called for an already munlock()ed vma, +mlock_fixup() simply returns. Because of the vma filtering discussed above, +VM_LOCKED will not be set in any "special" vmas. So, these vmas will be +ignored for munlock. + +If the vma is VM_LOCKED, mlock_fixup() again attempts to merge or split off +the specified range. The range is then munlocked via the function +__mlock_vma_pages_range()--the same function used to mlock a vma range-- +passing a flag to indicate that munlock() is being performed. + +Because the vma access protections could have been changed to PROT_NONE after +faulting in and mlocking some pages, get_user_pages() was unreliable for visiting +these pages for munlocking. Because we don't want to leave pages mlocked(), +get_user_pages() was enhanced to accept a flag to ignore the permissions when +fetching the pages--all of which should be resident as a result of previous +mlock()ing. + +For munlock(), __mlock_vma_pages_range() unlocks individual pages by calling +munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked +flag using TestClearPageMlocked(). As with mlock_vma_page(), munlock_vma_page() +use the Test*PageMlocked() function to handle the case where the page might +have already been unlocked by another task. If the page was mlocked, +munlock_vma_page() updates that zone statistics for the number of mlocked +pages. Note, however, that at this point we haven't checked whether the page +is mapped by other VM_LOCKED vmas. + +We can't call try_to_munlock(), the function that walks the reverse map to check +for other VM_LOCKED vmas, without first isolating the page from the LRU. +try_to_munlock() is a variant of try_to_unmap() and thus requires that the page +not be on an lru list. [More on these below.] However, the call to +isolate_lru_page() could fail, in which case we couldn't try_to_munlock(). +So, we go ahead and clear PG_mlocked up front, as this might be the only chance +we have. If we can successfully isolate the page, we go ahead and +try_to_munlock(), which will restore the PG_mlocked flag and update the zone +page statistics if it finds another vma holding the page mlocked. If we fail +to isolate the page, we'll have left a potentially mlocked page on the LRU. +This is fine, because we'll catch it later when/if vmscan tries to reclaim the +page. This should be relatively rare. + +Mlocked Pages: Migrating Them... + +A page that is being migrated has been isolated from the lru lists and is +held locked across unmapping of the page, updating the page's mapping +[address_space] entry and copying the contents and state, until the +page table entry has been replaced with an entry that refers to the new +page. Linux supports migration of mlocked pages and other unevictable +pages. This involves simply moving the PageMlocked and PageUnevictable states +from the old page to the new page. + +Note that page migration can race with mlocking or munlocking of the same +page. This has been discussed from the mlock/munlock perspective in the +respective sections above. Both processes [migration, m[un]locking], hold +the page locked. This provides the first level of synchronization. Page +migration zeros out the page_mapping of the old page before unlocking it, +so m[un]lock can skip these pages by testing the page mapping under page +lock. + +When completing page migration, we place the new and old pages back onto the +lru after dropping the page lock. The "unneeded" page--old page on success, +new page on failure--will be freed when the reference count held by the +migration process is released. To ensure that we don't strand pages on the +unevictable list because of a race between munlock and migration, page +migration uses the putback_lru_page() function to add migrated pages back to +the lru. + + +Mlocked Pages: mmap(MAP_LOCKED) System Call Handling + +In addition the the mlock()/mlockall() system calls, an application can request +that a region of memory be mlocked using the MAP_LOCKED flag with the mmap() +call. Furthermore, any mmap() call or brk() call that expands the heap by a +task that has previously called mlockall() with the MCL_FUTURE flag will result +in the newly mapped memory being mlocked. Before the unevictable/mlock changes, +the kernel simply called make_pages_present() to allocate pages and populate +the page table. + +To mlock a range of memory under the unevictable/mlock infrastructure, the +mmap() handler and task address space expansion functions call +mlock_vma_pages_range() specifying the vma and the address range to mlock. +mlock_vma_pages_range() filters vmas like mlock_fixup(), as described above in +"Mlocked Pages: Filtering Vmas". It will clear the VM_LOCKED flag, which will +have already been set by the caller, in filtered vmas. Thus these vma's need +not be visited for munlock when the region is unmapped. + +For "normal" vmas, mlock_vma_pages_range() calls __mlock_vma_pages_range() to +fault/allocate the pages and mlock them. Again, like mlock_fixup(), +mlock_vma_pages_range() downgrades the mmap semaphore to read mode before +attempting to fault/allocate and mlock the pages; and "upgrades" the semaphore +back to write mode before returning. + +The callers of mlock_vma_pages_range() will have already added the memory +range to be mlocked to the task's "locked_vm". To account for filtered vmas, +mlock_vma_pages_range() returns the number of pages NOT mlocked. All of the +callers then subtract a non-negative return value from the task's locked_vm. +A negative return value represent an error--for example, from get_user_pages() +attempting to fault in a vma with PROT_NONE access. In this case, we leave +the memory range accounted as locked_vm, as the protections could be changed +later and pages allocated into that region. + + +Mlocked Pages: munmap()/exit()/exec() System Call Handling + +When unmapping an mlocked region of memory, whether by an explicit call to +munmap() or via an internal unmap from exit() or exec() processing, we must +munlock the pages if we're removing the last VM_LOCKED vma that maps the pages. +Before the unevictable/mlock changes, mlocking did not mark the pages in any way, +so unmapping them required no processing. + +To munlock a range of memory under the unevictable/mlock infrastructure, the +munmap() hander and task address space tear down function call +munlock_vma_pages_all(). The name reflects the observation that one always +specifies the entire vma range when munlock()ing during unmap of a region. +Because of the vma filtering when mlocking() regions, only "normal" vmas that +actually contain mlocked pages will be passed to munlock_vma_pages_all(). + +munlock_vma_pages_all() clears the VM_LOCKED vma flag and, like mlock_fixup() +for the munlock case, calls __munlock_vma_pages_range() to walk the page table +for the vma's memory range and munlock_vma_page() each resident page mapped by +the vma. This effectively munlocks the page, only if this is the last +VM_LOCKED vma that maps the page. + + +Mlocked Page: try_to_unmap() + +[Note: the code changes represented by this section are really quite small +compared to the text to describe what happening and why, and to discuss the +implications.] + +Pages can, of course, be mapped into multiple vmas. Some of these vmas may +have VM_LOCKED flag set. It is possible for a page mapped into one or more +VM_LOCKED vmas not to have the PG_mlocked flag set and therefore reside on one +of the active or inactive LRU lists. This could happen if, for example, a +task in the process of munlock()ing the page could not isolate the page from +the LRU. As a result, vmscan/shrink_page_list() might encounter such a page +as described in "Unevictable Pages and Vmscan [shrink_*_list()]". To +handle this situation, try_to_unmap() has been enhanced to check for VM_LOCKED +vmas while it is walking a page's reverse map. + +try_to_unmap() is always called, by either vmscan for reclaim or for page +migration, with the argument page locked and isolated from the LRU. BUG_ON() +assertions enforce this requirement. Separate functions handle anonymous and +mapped file pages, as these types of pages have different reverse map +mechanisms. + + try_to_unmap_anon() + +To unmap anonymous pages, each vma in the list anchored in the anon_vma must be +visited--at least until a VM_LOCKED vma is encountered. If the page is being +unmapped for migration, VM_LOCKED vmas do not stop the process because mlocked +pages are migratable. However, for reclaim, if the page is mapped into a +VM_LOCKED vma, the scan stops. try_to_unmap() attempts to acquire the mmap +semphore of the mm_struct to which the vma belongs in read mode. If this is +successful, try_to_unmap() will mlock the page via mlock_vma_page()--we +wouldn't have gotten to try_to_unmap() if the page were already mlocked--and +will return SWAP_MLOCK, indicating that the page is unevictable. If the +mmap semaphore cannot be acquired, we are not sure whether the page is really +unevictable or not. In this case, try_to_unmap() will return SWAP_AGAIN. + + try_to_unmap_file() -- linear mappings + +Unmapping of a mapped file page works the same, except that the scan visits +all vmas that maps the page's index/page offset in the page's mapping's +reverse map priority search tree. It must also visit each vma in the page's +mapping's non-linear list, if the list is non-empty. As for anonymous pages, +on encountering a VM_LOCKED vma for a mapped file page, try_to_unmap() will +attempt to acquire the associated mm_struct's mmap semaphore to mlock the page, +returning SWAP_MLOCK if this is successful, and SWAP_AGAIN, if not. + + try_to_unmap_file() -- non-linear mappings + +If a page's mapping contains a non-empty non-linear mapping vma list, then +try_to_un{map|lock}() must also visit each vma in that list to determine +whether the page is mapped in a VM_LOCKED vma. Again, the scan must visit +all vmas in the non-linear list to ensure that the pages is not/should not be +mlocked. If a VM_LOCKED vma is found in the list, the scan could terminate. +However, there is no easy way to determine whether the page is actually mapped +in a given vma--either for unmapping or testing whether the VM_LOCKED vma +actually pins the page. + +So, try_to_unmap_file() handles non-linear mappings by scanning a certain +number of pages--a "cluster"--in each non-linear vma associated with the page's +mapping, for each file mapped page that vmscan tries to unmap. If this happens +to unmap the page we're trying to unmap, try_to_unmap() will notice this on +return--(page_mapcount(page) == 0)--and return SWAP_SUCCESS. Otherwise, it +will return SWAP_AGAIN, causing vmscan to recirculate this page. We take +advantage of the cluster scan in try_to_unmap_cluster() as follows: + +For each non-linear vma, try_to_unmap_cluster() attempts to acquire the mmap +semaphore of the associated mm_struct for read without blocking. If this +attempt is successful and the vma is VM_LOCKED, try_to_unmap_cluster() will +retain the mmap semaphore for the scan; otherwise it drops it here. Then, +for each page in the cluster, if we're holding the mmap semaphore for a locked +vma, try_to_unmap_cluster() calls mlock_vma_page() to mlock the page. This +call is a no-op if the page is already locked, but will mlock any pages in +the non-linear mapping that happen to be unlocked. If one of the pages so +mlocked is the page passed in to try_to_unmap(), try_to_unmap_cluster() will +return SWAP_MLOCK, rather than the default SWAP_AGAIN. This will allow vmscan +to cull the page, rather than recirculating it on the inactive list. Again, +if try_to_unmap_cluster() cannot acquire the vma's mmap sem, it returns +SWAP_AGAIN, indicating that the page is mapped by a VM_LOCKED vma, but +couldn't be mlocked. + + +Mlocked pages: try_to_munlock() Reverse Map Scan + +TODO/FIXME: a better name might be page_mlocked()--analogous to the +page_referenced() reverse map walker--especially if we continue to call this +from shrink_page_list(). See related TODO/FIXME below. + +When munlock_vma_page()--see "Mlocked Pages: munlock()/munlockall() System +Call Handling" above--tries to munlock a page, or when shrink_page_list() +encounters an anonymous page that is not yet in the swap cache, they need to +determine whether or not the page is mapped by any VM_LOCKED vma, without +actually attempting to unmap all ptes from the page. For this purpose, the +unevictable/mlock infrastructure introduced a variant of try_to_unmap() called +try_to_munlock(). + +try_to_munlock() calls the same functions as try_to_unmap() for anonymous and +mapped file pages with an additional argument specifing unlock versus unmap +processing. Again, these functions walk the respective reverse maps looking +for VM_LOCKED vmas. When such a vma is found for anonymous pages and file +pages mapped in linear VMAs, as in the try_to_unmap() case, the functions +attempt to acquire the associated mmap semphore, mlock the page via +mlock_vma_page() and return SWAP_MLOCK. This effectively undoes the +pre-clearing of the page's PG_mlocked done by munlock_vma_page() and informs +shrink_page_list() that the anonymous page should be culled rather than added +to the swap cache in preparation for a try_to_unmap() that will almost +certainly fail. + +If try_to_unmap() is unable to acquire a VM_LOCKED vma's associated mmap +semaphore, it will return SWAP_AGAIN. This will allow shrink_page_list() +to recycle the page on the inactive list and hope that it has better luck +with the page next time. + +For file pages mapped into non-linear vmas, the try_to_munlock() logic works +slightly differently. On encountering a VM_LOCKED non-linear vma that might +map the page, try_to_munlock() returns SWAP_AGAIN without actually mlocking +the page. munlock_vma_page() will just leave the page unlocked and let +vmscan deal with it--the usual fallback position. + +Note that try_to_munlock()'s reverse map walk must visit every vma in a pages' +reverse map to determine that a page is NOT mapped into any VM_LOCKED vma. +However, the scan can terminate when it encounters a VM_LOCKED vma and can +successfully acquire the vma's mmap semphore for read and mlock the page. +Although try_to_munlock() can be called many [very many!] times when +munlock()ing a large region or tearing down a large address space that has been +mlocked via mlockall(), overall this is a fairly rare event. In addition, +although shrink_page_list() calls try_to_munlock() for every anonymous page that +it handles that is not yet in the swap cache, on average anonymous pages will +have very short reverse map lists. + +Mlocked Page: Page Reclaim in shrink_*_list() + +shrink_active_list() culls any obviously unevictable pages--i.e., +!page_evictable(page, NULL)--diverting these to the unevictable lru +list. However, shrink_active_list() only sees unevictable pages that +made it onto the active/inactive lru lists. Note that these pages do not +have PageUnevictable set--otherwise, they would be on the unevictable list and +shrink_active_list would never see them. + +Some examples of these unevictable pages on the LRU lists are: + +1) ramfs pages that have been placed on the lru lists when first allocated. + +2) SHM_LOCKed shared memory pages. shmctl(SHM_LOCK) does not attempt to + allocate or fault in the pages in the shared memory region. This happens + when an application accesses the page the first time after SHM_LOCKing + the segment. + +3) Mlocked pages that could not be isolated from the lru and moved to the + unevictable list in mlock_vma_page(). + +3) Pages mapped into multiple VM_LOCKED vmas, but try_to_munlock() couldn't + acquire the vma's mmap semaphore to test the flags and set PageMlocked. + munlock_vma_page() was forced to let the page back on to the normal + LRU list for vmscan to handle. + +shrink_inactive_list() also culls any unevictable pages that it finds +on the inactive lists, again diverting them to the appropriate zone's unevictable +lru list. shrink_inactive_list() should only see SHM_LOCKed pages that became +SHM_LOCKed after shrink_active_list() had moved them to the inactive list, or +pages mapped into VM_LOCKED vmas that munlock_vma_page() couldn't isolate from +the lru to recheck via try_to_munlock(). shrink_inactive_list() won't notice +the latter, but will pass on to shrink_page_list(). + +shrink_page_list() again culls obviously unevictable pages that it could +encounter for similar reason to shrink_inactive_list(). As already discussed, +shrink_page_list() proactively looks for anonymous pages that should have +PG_mlocked set but don't--these would not be detected by page_evictable()--to +avoid adding them to the swap cache unnecessarily. File pages mapped into +VM_LOCKED vmas but without PG_mlocked set will make it all the way to +try_to_unmap(). shrink_page_list() will divert them to the unevictable list when +try_to_unmap() returns SWAP_MLOCK, as discussed above. + +TODO/FIXME: If we can enhance the swap cache to reliably remove entries +with page_count(page) > 2, as long as all ptes are mapped to the page and +not the swap entry, we can probably remove the call to try_to_munlock() in +shrink_page_list() and just remove the page from the swap cache when +try_to_unmap() returns SWAP_MLOCK. Currently, remove_exclusive_swap_page() +doesn't seem to allow that. + + diff --git a/MAINTAINERS b/MAINTAINERS index 355c192d699779f773259ddd647b64d574021cfe..5c3f79c26384eba384af07241c71f2cb251abdb5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1198,7 +1198,7 @@ S: Maintained CPU FREQUENCY DRIVERS P: Dave Jones -M: davej@codemonkey.org.uk +M: davej@redhat.com L: cpufreq@vger.kernel.org W: http://www.codemonkey.org.uk/projects/cpufreq/ T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index a0f642b6a4b92b556e08c8c1d1e357979cd48322..6110197757a3f6283bf999519f1e7649fb96a867 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -70,6 +70,7 @@ config AUTO_IRQ_AFFINITY default y source "init/Kconfig" +source "kernel/Kconfig.freezer" menu "System setup" diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 15fda434442428660e3d896661ec11c9f134e71e..d069526bd7673f9ead6eb33e825782ea690e2fb0 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -74,12 +74,14 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define TIF_UAC_SIGBUS 7 #define TIF_MEMDIE 8 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ +#define TIF_FREEZE 16 /* is freezing for suspend */ #define _TIF_SYSCALL_TRACE (1<= 70 */ @@ -525,7 +525,7 @@ set_rtc_mmss(unsigned long nowtime) cmos_minutes = CMOS_READ(RTC_MINUTES); if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - BCD_TO_BIN(cmos_minutes); + cmos_minutes = bcd2bin(cmos_minutes); /* * since we're only adjusting minutes and seconds, @@ -543,8 +543,8 @@ set_rtc_mmss(unsigned long nowtime) if (abs(real_minutes - cmos_minutes) < 30) { if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(real_seconds); - BIN_TO_BCD(real_minutes); + real_seconds = bin2bcd(real_seconds); + real_minutes = bin2bcd(real_minutes); } CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 6c73e963d976a017cfa50e6da27a66899fcfa99e..6ea097f0890fd7f300ddf68afe5f2bd9974bd9a0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -192,6 +192,8 @@ config VECTORS_BASE source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "System Type" choice @@ -538,16 +540,15 @@ config ARCH_OMAP help Support for TI's OMAP platform (OMAP1 and OMAP2). -config ARCH_MSM7X00A - bool "Qualcomm MSM7X00A" +config ARCH_MSM + bool "Qualcomm MSM" select GENERIC_TIME select GENERIC_CLOCKEVENTS help - Support for Qualcomm MSM7X00A based systems. This runs on the ARM11 - apps processor of the MSM7X00A and depends on a shared memory + Support for Qualcomm MSM7K based systems. This runs on the ARM11 + apps processor of the MSM7K and depends on a shared memory interface to the ARM9 modem processor which runs the baseband stack and controls some vital subsystems (clock and power control, etc). - endchoice diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7d5121260fda0370fe85303120544af07689cf79..bd6e28115ebb36284050b290ea1d8800fbec3763 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -141,7 +141,7 @@ endif machine-$(CONFIG_ARCH_MX3) := mx3 machine-$(CONFIG_ARCH_ORION5X) := orion5x plat-$(CONFIG_PLAT_ORION) := orion - machine-$(CONFIG_ARCH_MSM7X00A) := msm + machine-$(CONFIG_ARCH_MSM) := msm machine-$(CONFIG_ARCH_LOKI) := loki machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0 diff --git a/arch/arm/configs/msm_defconfig b/arch/arm/configs/msm_defconfig index ae4c5e62086ac454b7e4e140ec66f0acc74dcfbf..3b4ecf2a90ddd0328e08a44ce018243b1ac665f4 100644 --- a/arch/arm/configs/msm_defconfig +++ b/arch/arm/configs/msm_defconfig @@ -133,7 +133,7 @@ CONFIG_DEFAULT_IOSCHED="anticipatory" # CONFIG_ARCH_LH7A40X is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set -CONFIG_ARCH_MSM7X00A=y +CONFIG_ARCH_MSM=y # # Boot options diff --git a/arch/arm/mach-iop13xx/include/mach/time.h b/arch/arm/mach-iop13xx/include/mach/time.h index 49213d9d7cad5c6538f4999de573fe2d50e2b82d..d6d52527589dc7c6ad1f45053b8f2a08e7d3dfc6 100644 --- a/arch/arm/mach-iop13xx/include/mach/time.h +++ b/arch/arm/mach-iop13xx/include/mach/time.h @@ -41,7 +41,7 @@ static inline unsigned long iop13xx_core_freq(void) return 1200000000; default: printk("%s: warning unknown frequency, defaulting to 800Mhz\n", - __FUNCTION__); + __func__); } return 800000000; @@ -60,7 +60,7 @@ static inline unsigned long iop13xx_xsi_bus_ratio(void) return 4; default: printk("%s: warning unknown ratio, defaulting to 2\n", - __FUNCTION__); + __func__); } return 2; diff --git a/arch/arm/mach-ixp2000/ixdp2x00.c b/arch/arm/mach-ixp2000/ixdp2x00.c index b0653a87159a89b36b67da7674b8163e80439f0e..30451300751beb360ca41cf6e6f46814db1215f9 100644 --- a/arch/arm/mach-ixp2000/ixdp2x00.c +++ b/arch/arm/mach-ixp2000/ixdp2x00.c @@ -143,7 +143,7 @@ static struct irq_chip ixdp2x00_cpld_irq_chip = { .unmask = ixdp2x00_irq_unmask }; -void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_irqs) +void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_of_irqs) { unsigned int irq; @@ -154,7 +154,7 @@ void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigne board_irq_stat = stat_reg; board_irq_mask = mask_reg; - board_irq_count = nr_irqs; + board_irq_count = nr_of_irqs; *board_irq_mask = 0xffffffff; diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 3553babbbf0541653a899ee3b18317736a01ba79..d140abca690afdee1a0c78a0a288ec545312f3f2 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -1,18 +1,13 @@ -if ARCH_MSM7X00A +if ARCH_MSM -comment "MSM7X00A Board Type" - depends on ARCH_MSM7X00A +comment "MSM Board Type" + depends on ARCH_MSM config MACH_HALIBUT - depends on ARCH_MSM7X00A + depends on ARCH_MSM default y - bool "Halibut Board (QCT SURF7200A)" + bool "Halibut Board (QCT SURF7201A)" help - Support for the Qualcomm SURF7200A eval board. - -config MSM7X00A_IDLE - depends on ARCH_MSM7X00A - default y - bool "Idle Support for MSM7X00A" + Support for the Qualcomm SURF7201A eval board. endif diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index d12f2365585096d2d8ee3ffe04652a13d66318ca..1aa47001aa3b065b1b0d4f3a5387a0447bda5cb0 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -1,7 +1,8 @@ obj-y += io.o idle.o irq.o timer.o dma.o - -# Common code for board init -obj-y += common.o +obj-y += devices.o +obj-y += proc_comm.o +obj-y += vreg.o +obj-y += clock.o clock-7x01a.o obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index a24259133e070258de7aa83236159d681fc4e276..c2a96e3965a6ccfab9bb15e6e2fd0cf8ef0b3fe0 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c @@ -33,6 +33,8 @@ #include #include +#include "devices.h" + static struct resource smc91x_resources[] = { [0] = { .start = 0x9C004300, @@ -53,31 +55,12 @@ static struct platform_device smc91x_device = { .resource = smc91x_resources, }; -static void mddi0_panel_power(int on) -{ -} - -static struct msm_mddi_platform_data msm_mddi0_pdata = { - .panel_power = mddi0_panel_power, - .has_vsync_irq = 0, -}; - -static struct platform_device msm_mddi0_device = { - .name = "msm_mddi", - .id = 0, - .dev = { - .platform_data = &msm_mddi0_pdata - }, -}; - -static struct platform_device msm_serial0_device = { - .name = "msm_serial", - .id = 0, -}; - static struct platform_device *devices[] __initdata = { - &msm_serial0_device, - &msm_mddi0_device, + &msm_device_uart3, + &msm_device_smd, + &msm_device_nand, + &msm_device_hsusb, + &msm_device_i2c, &smc91x_device, }; @@ -91,20 +74,15 @@ static void __init halibut_init_irq(void) static void __init halibut_init(void) { platform_add_devices(devices, ARRAY_SIZE(devices)); - msm_add_devices(); } static void __init halibut_map_io(void) { msm_map_common_io(); + msm_clock_init(); } MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") - -/* UART for LL DEBUG */ - .phys_io = MSM_UART1_PHYS, - .io_pg_offst = ((MSM_UART1_BASE) >> 18) & 0xfffc, - .boot_params = 0x10000100, .map_io = halibut_map_io, .init_irq = halibut_init_irq, diff --git a/arch/arm/mach-msm/clock-7x01a.c b/arch/arm/mach-msm/clock-7x01a.c new file mode 100644 index 0000000000000000000000000000000000000000..62230a3428eebdb64f8d746fed4d4ce2c95c44e9 --- /dev/null +++ b/arch/arm/mach-msm/clock-7x01a.c @@ -0,0 +1,126 @@ +/* arch/arm/mach-msm/clock-7x01a.c + * + * Clock tables for MSM7X01A + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007 QUALCOMM Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include "clock.h" +#include "devices.h" + +/* clock IDs used by the modem processor */ + +#define ACPU_CLK 0 /* Applications processor clock */ +#define ADM_CLK 1 /* Applications data mover clock */ +#define ADSP_CLK 2 /* ADSP clock */ +#define EBI1_CLK 3 /* External bus interface 1 clock */ +#define EBI2_CLK 4 /* External bus interface 2 clock */ +#define ECODEC_CLK 5 /* External CODEC clock */ +#define EMDH_CLK 6 /* External MDDI host clock */ +#define GP_CLK 7 /* General purpose clock */ +#define GRP_CLK 8 /* Graphics clock */ +#define I2C_CLK 9 /* I2C clock */ +#define ICODEC_RX_CLK 10 /* Internal CODEX RX clock */ +#define ICODEC_TX_CLK 11 /* Internal CODEX TX clock */ +#define IMEM_CLK 12 /* Internal graphics memory clock */ +#define MDC_CLK 13 /* MDDI client clock */ +#define MDP_CLK 14 /* Mobile display processor clock */ +#define PBUS_CLK 15 /* Peripheral bus clock */ +#define PCM_CLK 16 /* PCM clock */ +#define PMDH_CLK 17 /* Primary MDDI host clock */ +#define SDAC_CLK 18 /* Stereo DAC clock */ +#define SDC1_CLK 19 /* Secure Digital Card clocks */ +#define SDC1_PCLK 20 +#define SDC2_CLK 21 +#define SDC2_PCLK 22 +#define SDC3_CLK 23 +#define SDC3_PCLK 24 +#define SDC4_CLK 25 +#define SDC4_PCLK 26 +#define TSIF_CLK 27 /* Transport Stream Interface clocks */ +#define TSIF_REF_CLK 28 +#define TV_DAC_CLK 29 /* TV clocks */ +#define TV_ENC_CLK 30 +#define UART1_CLK 31 /* UART clocks */ +#define UART2_CLK 32 +#define UART3_CLK 33 +#define UART1DM_CLK 34 +#define UART2DM_CLK 35 +#define USB_HS_CLK 36 /* High speed USB core clock */ +#define USB_HS_PCLK 37 /* High speed USB pbus clock */ +#define USB_OTG_CLK 38 /* Full speed USB clock */ +#define VDC_CLK 39 /* Video controller clock */ +#define VFE_CLK 40 /* Camera / Video Front End clock */ +#define VFE_MDC_CLK 41 /* VFE MDDI client clock */ + +#define NR_CLKS 42 + +#define CLOCK(clk_name, clk_id, clk_dev, clk_flags) { \ + .name = clk_name, \ + .id = clk_id, \ + .flags = clk_flags, \ + .dev = clk_dev, \ + } + +#define OFF CLKFLAG_AUTO_OFF +#define MINMAX CLKFLAG_USE_MIN_MAX_TO_SET + +struct clk msm_clocks[] = { + CLOCK("adm_clk", ADM_CLK, NULL, 0), + CLOCK("adsp_clk", ADSP_CLK, NULL, 0), + CLOCK("ebi1_clk", EBI1_CLK, NULL, 0), + CLOCK("ebi2_clk", EBI2_CLK, NULL, 0), + CLOCK("ecodec_clk", ECODEC_CLK, NULL, 0), + CLOCK("emdh_clk", EMDH_CLK, NULL, OFF), + CLOCK("gp_clk", GP_CLK, NULL, 0), + CLOCK("grp_clk", GRP_CLK, NULL, OFF), + CLOCK("i2c_clk", I2C_CLK, &msm_device_i2c.dev, 0), + CLOCK("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0), + CLOCK("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0), + CLOCK("imem_clk", IMEM_CLK, NULL, OFF), + CLOCK("mdc_clk", MDC_CLK, NULL, 0), + CLOCK("mdp_clk", MDP_CLK, NULL, OFF), + CLOCK("pbus_clk", PBUS_CLK, NULL, 0), + CLOCK("pcm_clk", PCM_CLK, NULL, 0), + CLOCK("pmdh_clk", PMDH_CLK, NULL, OFF | MINMAX), + CLOCK("sdac_clk", SDAC_CLK, NULL, OFF), + CLOCK("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF), + CLOCK("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF), + CLOCK("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF), + CLOCK("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF), + CLOCK("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF), + CLOCK("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF), + CLOCK("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF), + CLOCK("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF), + CLOCK("tsif_clk", TSIF_CLK, NULL, 0), + CLOCK("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), + CLOCK("tv_dac_clk", TV_DAC_CLK, NULL, 0), + CLOCK("tv_enc_clk", TV_ENC_CLK, NULL, 0), + CLOCK("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF), + CLOCK("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0), + CLOCK("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF), + CLOCK("uart1dm_clk", UART1DM_CLK, NULL, OFF), + CLOCK("uart2dm_clk", UART2DM_CLK, NULL, 0), + CLOCK("usb_hs_clk", USB_HS_CLK, &msm_device_hsusb.dev, OFF), + CLOCK("usb_hs_pclk", USB_HS_PCLK, &msm_device_hsusb.dev, OFF), + CLOCK("usb_otg_clk", USB_OTG_CLK, NULL, 0), + CLOCK("vdc_clk", VDC_CLK, NULL, OFF | MINMAX), + CLOCK("vfe_clk", VFE_CLK, NULL, OFF), + CLOCK("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF), +}; + +unsigned msm_num_clocks = ARRAY_SIZE(msm_clocks); diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c new file mode 100644 index 0000000000000000000000000000000000000000..3b1ce36f1032bec54ed69972c2126e70a4b789c8 --- /dev/null +++ b/arch/arm/mach-msm/clock.c @@ -0,0 +1,218 @@ +/* arch/arm/mach-msm/clock.c + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007 QUALCOMM Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clock.h" +#include "proc_comm.h" + +static DEFINE_MUTEX(clocks_mutex); +static DEFINE_SPINLOCK(clocks_lock); +static LIST_HEAD(clocks); + +/* + * glue for the proc_comm interface + */ +static inline int pc_clk_enable(unsigned id) +{ + return msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL); +} + +static inline void pc_clk_disable(unsigned id) +{ + msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL); +} + +static inline int pc_clk_set_rate(unsigned id, unsigned rate) +{ + return msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate); +} + +static inline int pc_clk_set_min_rate(unsigned id, unsigned rate) +{ + return msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &rate); +} + +static inline int pc_clk_set_max_rate(unsigned id, unsigned rate) +{ + return msm_proc_comm(PCOM_CLKCTL_RPC_MAX_RATE, &id, &rate); +} + +static inline int pc_clk_set_flags(unsigned id, unsigned flags) +{ + return msm_proc_comm(PCOM_CLKCTL_RPC_SET_FLAGS, &id, &flags); +} + +static inline unsigned pc_clk_get_rate(unsigned id) +{ + if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL)) + return 0; + else + return id; +} + +static inline unsigned pc_clk_is_enabled(unsigned id) +{ + if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL)) + return 0; + else + return id; +} + +static inline int pc_pll_request(unsigned id, unsigned on) +{ + on = !!on; + return msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on); +} + +/* + * Standard clock functions defined in include/linux/clk.h + */ +struct clk *clk_get(struct device *dev, const char *id) +{ + struct clk *clk; + + mutex_lock(&clocks_mutex); + + list_for_each_entry(clk, &clocks, list) + if (!strcmp(id, clk->name) && clk->dev == dev) + goto found_it; + + list_for_each_entry(clk, &clocks, list) + if (!strcmp(id, clk->name) && clk->dev == NULL) + goto found_it; + + clk = ERR_PTR(-ENOENT); +found_it: + mutex_unlock(&clocks_mutex); + return clk; +} +EXPORT_SYMBOL(clk_get); + +void clk_put(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_put); + +int clk_enable(struct clk *clk) +{ + unsigned long flags; + spin_lock_irqsave(&clocks_lock, flags); + clk->count++; + if (clk->count == 1) + pc_clk_enable(clk->id); + spin_unlock_irqrestore(&clocks_lock, flags); + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ + unsigned long flags; + spin_lock_irqsave(&clocks_lock, flags); + BUG_ON(clk->count == 0); + clk->count--; + if (clk->count == 0) + pc_clk_disable(clk->id); + spin_unlock_irqrestore(&clocks_lock, flags); +} +EXPORT_SYMBOL(clk_disable); + +unsigned long clk_get_rate(struct clk *clk) +{ + return pc_clk_get_rate(clk->id); +} +EXPORT_SYMBOL(clk_get_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + int ret; + if (clk->flags & CLKFLAG_USE_MIN_MAX_TO_SET) { + ret = pc_clk_set_max_rate(clk->id, rate); + if (ret) + return ret; + return pc_clk_set_min_rate(clk->id, rate); + } + return pc_clk_set_rate(clk->id, rate); +} +EXPORT_SYMBOL(clk_set_rate); + +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return -ENOSYS; +} +EXPORT_SYMBOL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + return ERR_PTR(-ENOSYS); +} +EXPORT_SYMBOL(clk_get_parent); + +int clk_set_flags(struct clk *clk, unsigned long flags) +{ + if (clk == NULL || IS_ERR(clk)) + return -EINVAL; + return pc_clk_set_flags(clk->id, flags); +} +EXPORT_SYMBOL(clk_set_flags); + + +void __init msm_clock_init(void) +{ + unsigned n; + + spin_lock_init(&clocks_lock); + mutex_lock(&clocks_mutex); + for (n = 0; n < msm_num_clocks; n++) + list_add_tail(&msm_clocks[n].list, &clocks); + mutex_unlock(&clocks_mutex); +} + +/* The bootloader and/or AMSS may have left various clocks enabled. + * Disable any clocks that belong to us (CLKFLAG_AUTO_OFF) but have + * not been explicitly enabled by a clk_enable() call. + */ +static int __init clock_late_init(void) +{ + unsigned long flags; + struct clk *clk; + unsigned count = 0; + + mutex_lock(&clocks_mutex); + list_for_each_entry(clk, &clocks, list) { + if (clk->flags & CLKFLAG_AUTO_OFF) { + spin_lock_irqsave(&clocks_lock, flags); + if (!clk->count) { + count++; + pc_clk_disable(clk->id); + } + spin_unlock_irqrestore(&clocks_lock, flags); + } + } + mutex_unlock(&clocks_mutex); + pr_info("clock_late_init() disabled %d unused clocks\n", count); + return 0; +} + +late_initcall(clock_late_init); diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h new file mode 100644 index 0000000000000000000000000000000000000000..f875e1544e5feacfbacf165ade0d29b48667c7ea --- /dev/null +++ b/arch/arm/mach-msm/clock.h @@ -0,0 +1,48 @@ +/* arch/arm/mach-msm/clock.h + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007 QUALCOMM Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_CLOCK_H +#define __ARCH_ARM_MACH_MSM_CLOCK_H + +#include + +#define CLKFLAG_INVERT 0x00000001 +#define CLKFLAG_NOINVERT 0x00000002 +#define CLKFLAG_NONEST 0x00000004 +#define CLKFLAG_NORESET 0x00000008 + +#define CLK_FIRST_AVAILABLE_FLAG 0x00000100 +#define CLKFLAG_USE_MIN_MAX_TO_SET 0x00000200 +#define CLKFLAG_AUTO_OFF 0x00000400 + +struct clk { + uint32_t id; + uint32_t count; + uint32_t flags; + const char *name; + struct list_head list; + struct device *dev; +}; + +#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100) +#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104) +#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124) + +extern struct clk msm_clocks[]; +extern unsigned msm_num_clocks; + +#endif + diff --git a/arch/arm/mach-msm/common.c b/arch/arm/mach-msm/common.c deleted file mode 100644 index 604f8ade9587948244a7390da3a8263c0c2f5859..0000000000000000000000000000000000000000 --- a/arch/arm/mach-msm/common.c +++ /dev/null @@ -1,116 +0,0 @@ -/* linux/arch/arm/mach-msm/common.c - * - * Common setup code for MSM7K Boards - * - * Copyright (C) 2007 Google, Inc. - * Author: Brian Swetland - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include - -#include - -#include - -#include -#include - -#include - -#include - -struct flash_platform_data msm_nand_data = { - .parts = 0, - .nr_parts = 0, -}; - -static struct resource msm_nand_resources[] = { - [0] = { - .start = 7, - .end = 7, - .flags = IORESOURCE_DMA, - }, -}; - -static struct platform_device msm_nand_device = { - .name = "msm_nand", - .id = -1, - .num_resources = ARRAY_SIZE(msm_nand_resources), - .resource = msm_nand_resources, - .dev = { - .platform_data = &msm_nand_data, - }, -}; - -static struct platform_device msm_smd_device = { - .name = "msm_smd", - .id = -1, -}; - -static struct resource msm_i2c_resources[] = { - { - .start = MSM_I2C_BASE, - .end = MSM_I2C_BASE + MSM_I2C_SIZE - 1, - .flags = IORESOURCE_MEM, - }, - { - .start = INT_PWB_I2C, - .end = INT_PWB_I2C, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device msm_i2c_device = { - .name = "msm_i2c", - .id = 0, - .num_resources = ARRAY_SIZE(msm_i2c_resources), - .resource = msm_i2c_resources, -}; - -static struct resource usb_resources[] = { - { - .start = MSM_HSUSB_PHYS, - .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE, - .flags = IORESOURCE_MEM, - }, - { - .start = INT_USB_HS, - .end = INT_USB_HS, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device msm_hsusb_device = { - .name = "msm_hsusb", - .id = -1, - .num_resources = ARRAY_SIZE(usb_resources), - .resource = usb_resources, - .dev = { - .coherent_dma_mask = 0xffffffff, - }, -}; - -static struct platform_device *devices[] __initdata = { - &msm_nand_device, - &msm_smd_device, - &msm_i2c_device, - &msm_hsusb_device, -}; - -void __init msm_add_devices(void) -{ - platform_add_devices(devices, ARRAY_SIZE(devices)); -} diff --git a/arch/arm/mach-msm/devices.c b/arch/arm/mach-msm/devices.c new file mode 100644 index 0000000000000000000000000000000000000000..f2a74b92a97fa59085a7fa7266711b99728a77b8 --- /dev/null +++ b/arch/arm/mach-msm/devices.c @@ -0,0 +1,267 @@ +/* linux/arch/arm/mach-msm/devices.c + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include +#include "devices.h" + +#include +#include +#include + +static struct resource resources_uart1[] = { + { + .start = INT_UART1, + .end = INT_UART1, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_UART1_PHYS, + .end = MSM_UART1_PHYS + MSM_UART1_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource resources_uart2[] = { + { + .start = INT_UART2, + .end = INT_UART2, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_UART2_PHYS, + .end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource resources_uart3[] = { + { + .start = INT_UART3, + .end = INT_UART3, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_UART3_PHYS, + .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +struct platform_device msm_device_uart1 = { + .name = "msm_serial", + .id = 0, + .num_resources = ARRAY_SIZE(resources_uart1), + .resource = resources_uart1, +}; + +struct platform_device msm_device_uart2 = { + .name = "msm_serial", + .id = 1, + .num_resources = ARRAY_SIZE(resources_uart2), + .resource = resources_uart2, +}; + +struct platform_device msm_device_uart3 = { + .name = "msm_serial", + .id = 2, + .num_resources = ARRAY_SIZE(resources_uart3), + .resource = resources_uart3, +}; + +static struct resource resources_i2c[] = { + { + .start = MSM_I2C_PHYS, + .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_PWB_I2C, + .end = INT_PWB_I2C, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_i2c = { + .name = "msm_i2c", + .id = 0, + .num_resources = ARRAY_SIZE(resources_i2c), + .resource = resources_i2c, +}; + +static struct resource resources_hsusb[] = { + { + .start = MSM_HSUSB_PHYS, + .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_USB_HS, + .end = INT_USB_HS, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_hsusb = { + .name = "msm_hsusb", + .id = -1, + .num_resources = ARRAY_SIZE(resources_hsusb), + .resource = resources_hsusb, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct flash_platform_data msm_nand_data = { + .parts = NULL, + .nr_parts = 0, +}; + +static struct resource resources_nand[] = { + [0] = { + .start = 7, + .end = 7, + .flags = IORESOURCE_DMA, + }, +}; + +struct platform_device msm_device_nand = { + .name = "msm_nand", + .id = -1, + .num_resources = ARRAY_SIZE(resources_nand), + .resource = resources_nand, + .dev = { + .platform_data = &msm_nand_data, + }, +}; + +struct platform_device msm_device_smd = { + .name = "msm_smd", + .id = -1, +}; + +static struct resource resources_sdc1[] = { + { + .start = MSM_SDC1_PHYS, + .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC1_0, + .end = INT_SDC1_1, + .flags = IORESOURCE_IRQ, + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc2[] = { + { + .start = MSM_SDC2_PHYS, + .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC2_0, + .end = INT_SDC2_1, + .flags = IORESOURCE_IRQ, + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc3[] = { + { + .start = MSM_SDC3_PHYS, + .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC3_0, + .end = INT_SDC3_1, + .flags = IORESOURCE_IRQ, + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc4[] = { + { + .start = MSM_SDC4_PHYS, + .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC4_0, + .end = INT_SDC4_1, + .flags = IORESOURCE_IRQ, + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +struct platform_device msm_device_sdc1 = { + .name = "msm_sdcc", + .id = 1, + .num_resources = ARRAY_SIZE(resources_sdc1), + .resource = resources_sdc1, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc2 = { + .name = "msm_sdcc", + .id = 2, + .num_resources = ARRAY_SIZE(resources_sdc2), + .resource = resources_sdc2, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc3 = { + .name = "msm_sdcc", + .id = 3, + .num_resources = ARRAY_SIZE(resources_sdc3), + .resource = resources_sdc3, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc4 = { + .name = "msm_sdcc", + .id = 4, + .num_resources = ARRAY_SIZE(resources_sdc4), + .resource = resources_sdc4, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h new file mode 100644 index 0000000000000000000000000000000000000000..0744c4a27d6ab84b60a0d6acafc8b13b6ef2d5a2 --- /dev/null +++ b/arch/arm/mach-msm/devices.h @@ -0,0 +1,36 @@ +/* linux/arch/arm/mach-msm/devices.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_DEVICES_H +#define __ARCH_ARM_MACH_MSM_DEVICES_H + +extern struct platform_device msm_device_uart1; +extern struct platform_device msm_device_uart2; +extern struct platform_device msm_device_uart3; + +extern struct platform_device msm_device_sdc1; +extern struct platform_device msm_device_sdc2; +extern struct platform_device msm_device_sdc3; +extern struct platform_device msm_device_sdc4; + +extern struct platform_device msm_device_hsusb; + +extern struct platform_device msm_device_i2c; + +extern struct platform_device msm_device_smd; + +extern struct platform_device msm_device_nand; + +#endif diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c index 0c8f252637e15a31486887c15dd28818829691eb..f5420f9585c59ec504dd85325c55e42141e50f96 100644 --- a/arch/arm/mach-msm/dma.c +++ b/arch/arm/mach-msm/dma.c @@ -26,7 +26,7 @@ enum { }; static DEFINE_SPINLOCK(msm_dmov_lock); -static struct msm_dmov_cmd active_command; +static unsigned int channel_active; static struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT]; static struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT]; unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS; @@ -43,6 +43,11 @@ unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS; #define PRINT_FLOW(format, args...) \ MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args); +void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful) +{ + writel((graceful << 31), DMOV_FLUSH0(id)); +} + void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd) { unsigned long irq_flags; @@ -60,6 +65,9 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd) #endif PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n", id, status); list_add_tail(&cmd->list, &active_commands[id]); + if (!channel_active) + enable_irq(INT_ADM_AARM); + channel_active |= 1U << id; writel(cmd->cmdptr, DMOV_CMD_PTR(id)); } else { if (list_empty(&active_commands[id])) @@ -76,21 +84,19 @@ struct msm_dmov_exec_cmdptr_cmd { struct completion complete; unsigned id; unsigned int result; - unsigned int flush[6]; + struct msm_dmov_errdata err; }; -static void dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd, unsigned int result) +static void +dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd, + unsigned int result, + struct msm_dmov_errdata *err) { struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd); cmd->result = result; - if (result != 0x80000002) { - cmd->flush[0] = readl(DMOV_FLUSH0(cmd->id)); - cmd->flush[1] = readl(DMOV_FLUSH1(cmd->id)); - cmd->flush[2] = readl(DMOV_FLUSH2(cmd->id)); - cmd->flush[3] = readl(DMOV_FLUSH3(cmd->id)); - cmd->flush[4] = readl(DMOV_FLUSH4(cmd->id)); - cmd->flush[5] = readl(DMOV_FLUSH5(cmd->id)); - } + if (result != 0x80000002 && err) + memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata)); + complete(&cmd->complete); } @@ -111,7 +117,7 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr) if (cmd.result != 0x80000002) { PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result); PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n", - id, cmd.flush[0], cmd.flush[1], cmd.flush[2], cmd.flush[3]); + id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]); return -EIO; } PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr); @@ -159,25 +165,40 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id) "for %p, result %x\n", id, cmd, ch_result); if (cmd) { list_del(&cmd->list); - cmd->complete_func(cmd, ch_result); + cmd->complete_func(cmd, ch_result, NULL); } } if (ch_result & DMOV_RSLT_FLUSH) { - unsigned int flush0 = readl(DMOV_FLUSH0(id)); + struct msm_dmov_errdata errdata; + + errdata.flush[0] = readl(DMOV_FLUSH0(id)); + errdata.flush[1] = readl(DMOV_FLUSH1(id)); + errdata.flush[2] = readl(DMOV_FLUSH2(id)); + errdata.flush[3] = readl(DMOV_FLUSH3(id)); + errdata.flush[4] = readl(DMOV_FLUSH4(id)); + errdata.flush[5] = readl(DMOV_FLUSH5(id)); PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status); - PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, flush0); + PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]); if (cmd) { list_del(&cmd->list); - cmd->complete_func(cmd, ch_result); + cmd->complete_func(cmd, ch_result, &errdata); } } if (ch_result & DMOV_RSLT_ERROR) { - unsigned int flush0 = readl(DMOV_FLUSH0(id)); + struct msm_dmov_errdata errdata; + + errdata.flush[0] = readl(DMOV_FLUSH0(id)); + errdata.flush[1] = readl(DMOV_FLUSH1(id)); + errdata.flush[2] = readl(DMOV_FLUSH2(id)); + errdata.flush[3] = readl(DMOV_FLUSH3(id)); + errdata.flush[4] = readl(DMOV_FLUSH4(id)); + errdata.flush[5] = readl(DMOV_FLUSH5(id)); + PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status); - PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, flush0); + PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]); if (cmd) { list_del(&cmd->list); - cmd->complete_func(cmd, ch_result); + cmd->complete_func(cmd, ch_result, &errdata); } /* this does not seem to work, once we get an error */ /* the datamover will no longer accept commands */ @@ -193,8 +214,14 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id) writel(cmd->cmdptr, DMOV_CMD_PTR(id)); } } while (ch_status & DMOV_STATUS_RSLT_VALID); + if (list_empty(&active_commands[id]) && list_empty(&ready_commands[id])) + channel_active &= ~(1U << id); PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status); } + + if (!channel_active) + disable_irq(INT_ADM_AARM); + spin_unlock_irqrestore(&msm_dmov_lock, irq_flags); return IRQ_HANDLED; } @@ -202,12 +229,17 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id) static int __init msm_init_datamover(void) { int i; + int ret; for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) { INIT_LIST_HEAD(&ready_commands[i]); INIT_LIST_HEAD(&active_commands[i]); writel(DMOV_CONFIG_IRQ_EN | DMOV_CONFIG_FORCE_TOP_PTR_RSLT | DMOV_CONFIG_FORCE_FLUSH_RSLT, DMOV_CONFIG(i)); } - return request_irq(INT_ADM_AARM, msm_datamover_irq_handler, 0, "msmdatamover", NULL); + ret = request_irq(INT_ADM_AARM, msm_datamover_irq_handler, 0, "msmdatamover", NULL); + if (ret) + return ret; + disable_irq(INT_ADM_AARM); + return 0; } arch_initcall(msm_init_datamover); diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h index a7639493c095391ff24da034be9f5b31bfc93855..264d62e519f3dedcd6f8024ccfb7c8f5d851737e 100644 --- a/arch/arm/mach-msm/include/mach/board.h +++ b/arch/arm/mach-msm/include/mach/board.h @@ -33,5 +33,6 @@ void __init msm_add_devices(void); void __init msm_map_common_io(void); void __init msm_init_irq(void); void __init msm_init_gpio(void); +void __init msm_clock_init(void); #endif diff --git a/arch/arm/mach-msm/include/mach/debug-macro.S b/arch/arm/mach-msm/include/mach/debug-macro.S index 528eef4b605cdf1ee7c4b75899e6efc441f06744..1db3c97dbc49e4738d386396a5db3e6169cb7785 100644 --- a/arch/arm/mach-msm/include/mach/debug-macro.S +++ b/arch/arm/mach-msm/include/mach/debug-macro.S @@ -22,18 +22,22 @@ mrc p15, 0, \rx, c1, c0 tst \rx, #1 ldreq \rx, =MSM_UART1_PHYS - ldrne \rx, =MSM_UART1_BASE + movne \rx, #0 .endm .macro senduart,rd,rx - str \rd, [\rx, #0x0C] + teq \rx, #0 + strne \rd, [\rx, #0x0C] .endm .macro waituart,rd,rx @ wait for TX_READY + teq \rx, #0 + bne 2f 1: ldr \rd, [\rx, #0x08] tst \rd, #0x04 beq 1b +2: .endm .macro busyuart,rd,rx diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h index ad1c87f86d10598080627c4553b3f265273c07f4..5ab5bdffab07334781b960661deebe041bab408b 100644 --- a/arch/arm/mach-msm/include/mach/dma.h +++ b/arch/arm/mach-msm/include/mach/dma.h @@ -1,4 +1,4 @@ -/* arch/arm/mach-msm/include/mach/dma.h +/* linux/include/asm-arm/arch-msm/dma.h * * Copyright (C) 2007 Google, Inc. * @@ -18,17 +18,21 @@ #include #include +struct msm_dmov_errdata { + uint32_t flush[6]; +}; + struct msm_dmov_cmd { struct list_head list; unsigned int cmdptr; - void (*complete_func)(struct msm_dmov_cmd *cmd, unsigned int result); -/* void (*user_result_func)(struct msm_dmov_cmd *cmd); */ + void (*complete_func)(struct msm_dmov_cmd *cmd, + unsigned int result, + struct msm_dmov_errdata *err); }; void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd); -void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd); +void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful); int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr); -/* int msm_dmov_exec_cmd_etc(unsigned id, unsigned int cmdptr, int timeout, int interruptible); */ @@ -122,6 +126,16 @@ typedef struct { unsigned _reserved; } dmov_sg; +/* Box mode */ +typedef struct { + uint32_t cmd; + uint32_t src_row_addr; + uint32_t dst_row_addr; + uint32_t src_dst_len; + uint32_t num_rows; + uint32_t row_offset; +} dmov_box; + /* bits for the cmd field of the above structures */ #define CMD_LC (1 << 31) /* last command */ diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h index e221f58ceea3997a0a90f0b52d997f89a0874acf..2f7b4c8620d95f257e6c845f541ce4a42a4abea7 100644 --- a/arch/arm/mach-msm/include/mach/msm_iomap.h +++ b/arch/arm/mach-msm/include/mach/msm_iomap.h @@ -37,11 +37,17 @@ * */ -#define MSM_VIC_BASE 0xE0000000 +#ifdef __ASSEMBLY__ +#define IOMEM(x) x +#else +#define IOMEM(x) ((void __force __iomem *)(x)) +#endif + +#define MSM_VIC_BASE IOMEM(0xE0000000) #define MSM_VIC_PHYS 0xC0000000 #define MSM_VIC_SIZE SZ_4K -#define MSM_CSR_BASE 0xE0001000 +#define MSM_CSR_BASE IOMEM(0xE0001000) #define MSM_CSR_PHYS 0xC0100000 #define MSM_CSR_SIZE SZ_4K @@ -49,56 +55,67 @@ #define MSM_GPT_BASE MSM_CSR_BASE #define MSM_GPT_SIZE SZ_4K -#define MSM_DMOV_BASE 0xE0002000 +#define MSM_DMOV_BASE IOMEM(0xE0002000) #define MSM_DMOV_PHYS 0xA9700000 #define MSM_DMOV_SIZE SZ_4K -#define MSM_UART1_BASE 0xE0003000 +#define MSM_GPIO1_BASE IOMEM(0xE0003000) +#define MSM_GPIO1_PHYS 0xA9200000 +#define MSM_GPIO1_SIZE SZ_4K + +#define MSM_GPIO2_BASE IOMEM(0xE0004000) +#define MSM_GPIO2_PHYS 0xA9300000 +#define MSM_GPIO2_SIZE SZ_4K + +#define MSM_CLK_CTL_BASE IOMEM(0xE0005000) +#define MSM_CLK_CTL_PHYS 0xA8600000 +#define MSM_CLK_CTL_SIZE SZ_4K + +#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000) +#define MSM_SHARED_RAM_PHYS 0x01F00000 +#define MSM_SHARED_RAM_SIZE SZ_1M + #define MSM_UART1_PHYS 0xA9A00000 #define MSM_UART1_SIZE SZ_4K -#define MSM_UART2_BASE 0xE0004000 #define MSM_UART2_PHYS 0xA9B00000 #define MSM_UART2_SIZE SZ_4K -#define MSM_UART3_BASE 0xE0005000 #define MSM_UART3_PHYS 0xA9C00000 #define MSM_UART3_SIZE SZ_4K -#define MSM_I2C_BASE 0xE0006000 -#define MSM_I2C_PHYS 0xA9900000 -#define MSM_I2C_SIZE SZ_4K +#define MSM_SDC1_PHYS 0xA0400000 +#define MSM_SDC1_SIZE SZ_4K -#define MSM_GPIO1_BASE 0xE0007000 -#define MSM_GPIO1_PHYS 0xA9200000 -#define MSM_GPIO1_SIZE SZ_4K +#define MSM_SDC2_PHYS 0xA0500000 +#define MSM_SDC2_SIZE SZ_4K -#define MSM_GPIO2_BASE 0xE0008000 -#define MSM_GPIO2_PHYS 0xA9300000 -#define MSM_GPIO2_SIZE SZ_4K +#define MSM_SDC3_PHYS 0xA0600000 +#define MSM_SDC3_SIZE SZ_4K + +#define MSM_SDC4_PHYS 0xA0700000 +#define MSM_SDC4_SIZE SZ_4K + +#define MSM_I2C_PHYS 0xA9900000 +#define MSM_I2C_SIZE SZ_4K -#define MSM_HSUSB_BASE 0xE0009000 #define MSM_HSUSB_PHYS 0xA0800000 #define MSM_HSUSB_SIZE SZ_4K -#define MSM_CLK_CTL_BASE 0xE000A000 -#define MSM_CLK_CTL_PHYS 0xA8600000 -#define MSM_CLK_CTL_SIZE SZ_4K - -#define MSM_PMDH_BASE 0xE000B000 #define MSM_PMDH_PHYS 0xAA600000 #define MSM_PMDH_SIZE SZ_4K -#define MSM_EMDH_BASE 0xE000C000 #define MSM_EMDH_PHYS 0xAA700000 #define MSM_EMDH_SIZE SZ_4K -#define MSM_MDP_BASE 0xE0010000 #define MSM_MDP_PHYS 0xAA200000 #define MSM_MDP_SIZE 0x000F0000 -#define MSM_SHARED_RAM_BASE 0xE0100000 -#define MSM_SHARED_RAM_PHYS 0x01F00000 -#define MSM_SHARED_RAM_SIZE SZ_1M +#define MSM_MDC_PHYS 0xAA500000 +#define MSM_MDC_SIZE SZ_1M + +#define MSM_AD5_PHYS 0xAC000000 +#define MSM_AD5_SIZE (SZ_1M*13) + #endif diff --git a/arch/arm/mach-msm/include/mach/vreg.h b/arch/arm/mach-msm/include/mach/vreg.h new file mode 100644 index 0000000000000000000000000000000000000000..9f9e25cb718e3f00e7b24e4d785656b9413b486d --- /dev/null +++ b/arch/arm/mach-msm/include/mach/vreg.h @@ -0,0 +1,29 @@ +/* linux/include/asm-arm/arch-msm/vreg.h + * + * Copyright (C) 2008 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_VREG_H +#define __ARCH_ARM_MACH_MSM_VREG_H + +struct vreg; + +struct vreg *vreg_get(struct device *dev, const char *id); +void vreg_put(struct vreg *vreg); + +int vreg_enable(struct vreg *vreg); +void vreg_disable(struct vreg *vreg); +int vreg_set_level(struct vreg *vreg, unsigned mv); + +#endif diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index 7999e4ba8e200c4325f4df99d296262453aa1eaf..6e7692ff6f2cb1a7416a8b12c44d0aed0748f3ae 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c @@ -28,7 +28,7 @@ #include #define MSM_DEVICE(name) { \ - .virtual = MSM_##name##_BASE, \ + .virtual = (unsigned long) MSM_##name##_BASE, \ .pfn = __phys_to_pfn(MSM_##name##_PHYS), \ .length = MSM_##name##_SIZE, \ .type = MT_DEVICE_NONSHARED, \ @@ -39,19 +39,11 @@ static struct map_desc msm_io_desc[] __initdata = { MSM_DEVICE(CSR), MSM_DEVICE(GPT), MSM_DEVICE(DMOV), - MSM_DEVICE(UART1), - MSM_DEVICE(UART2), - MSM_DEVICE(UART3), - MSM_DEVICE(I2C), MSM_DEVICE(GPIO1), MSM_DEVICE(GPIO2), - MSM_DEVICE(HSUSB), MSM_DEVICE(CLK_CTL), - MSM_DEVICE(PMDH), - MSM_DEVICE(EMDH), - MSM_DEVICE(MDP), { - .virtual = MSM_SHARED_RAM_BASE, + .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS), .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c index 04b8d182ff8aa72a33af684d79bc1f9ba7094f86..69ca0dd79bdfc65ed8bdbeff8d1535969f1bd5a8 100644 --- a/arch/arm/mach-msm/irq.c +++ b/arch/arm/mach-msm/irq.c @@ -66,20 +66,20 @@ static void msm_irq_ack(unsigned int irq) { - unsigned reg = VIC_INT_CLEAR0 + ((irq & 32) ? 4 : 0); + void __iomem *reg = VIC_INT_CLEAR0 + ((irq & 32) ? 4 : 0); irq = 1 << (irq & 31); writel(irq, reg); } static void msm_irq_mask(unsigned int irq) { - unsigned reg = VIC_INT_ENCLEAR0 + ((irq & 32) ? 4 : 0); + void __iomem *reg = VIC_INT_ENCLEAR0 + ((irq & 32) ? 4 : 0); writel(1 << (irq & 31), reg); } static void msm_irq_unmask(unsigned int irq) { - unsigned reg = VIC_INT_ENSET0 + ((irq & 32) ? 4 : 0); + void __iomem *reg = VIC_INT_ENSET0 + ((irq & 32) ? 4 : 0); writel(1 << (irq & 31), reg); } @@ -90,8 +90,8 @@ static int msm_irq_set_wake(unsigned int irq, unsigned int on) static int msm_irq_set_type(unsigned int irq, unsigned int flow_type) { - unsigned treg = VIC_INT_TYPE0 + ((irq & 32) ? 4 : 0); - unsigned preg = VIC_INT_POLARITY0 + ((irq & 32) ? 4 : 0); + void __iomem *treg = VIC_INT_TYPE0 + ((irq & 32) ? 4 : 0); + void __iomem *preg = VIC_INT_POLARITY0 + ((irq & 32) ? 4 : 0); int b = 1 << (irq & 31); if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c new file mode 100644 index 0000000000000000000000000000000000000000..915ee704ed3c2d6bc305421c5d02badc0f18bfeb --- /dev/null +++ b/arch/arm/mach-msm/proc_comm.c @@ -0,0 +1,110 @@ +/* arch/arm/mach-msm/proc_comm.c + * + * Copyright (C) 2007-2008 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "proc_comm.h" + +#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4) + +static inline void notify_other_proc_comm(void) +{ + writel(1, MSM_A2M_INT(6)); +} + +#define APP_COMMAND 0x00 +#define APP_STATUS 0x04 +#define APP_DATA1 0x08 +#define APP_DATA2 0x0C + +#define MDM_COMMAND 0x10 +#define MDM_STATUS 0x14 +#define MDM_DATA1 0x18 +#define MDM_DATA2 0x1C + +static DEFINE_SPINLOCK(proc_comm_lock); + +/* The higher level SMD support will install this to + * provide a way to check for and handle modem restart. + */ +int (*msm_check_for_modem_crash)(void); + +/* Poll for a state change, checking for possible + * modem crashes along the way (so we don't wait + * forever while the ARM9 is blowing up). + * + * Return an error in the event of a modem crash and + * restart so the msm_proc_comm() routine can restart + * the operation from the beginning. + */ +static int proc_comm_wait_for(void __iomem *addr, unsigned value) +{ + for (;;) { + if (readl(addr) == value) + return 0; + + if (msm_check_for_modem_crash) + if (msm_check_for_modem_crash()) + return -EAGAIN; + } +} + +int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2) +{ + void __iomem *base = MSM_SHARED_RAM_BASE; + unsigned long flags; + int ret; + + spin_lock_irqsave(&proc_comm_lock, flags); + + for (;;) { + if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY)) + continue; + + writel(cmd, base + APP_COMMAND); + writel(data1 ? *data1 : 0, base + APP_DATA1); + writel(data2 ? *data2 : 0, base + APP_DATA2); + + notify_other_proc_comm(); + + if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE)) + continue; + + if (readl(base + APP_STATUS) != PCOM_CMD_FAIL) { + if (data1) + *data1 = readl(base + APP_DATA1); + if (data2) + *data2 = readl(base + APP_DATA2); + ret = 0; + } else { + ret = -EIO; + } + break; + } + + writel(PCOM_CMD_IDLE, base + APP_COMMAND); + + spin_unlock_irqrestore(&proc_comm_lock, flags); + + return ret; +} + + diff --git a/arch/arm/mach-msm/proc_comm.h b/arch/arm/mach-msm/proc_comm.h new file mode 100644 index 0000000000000000000000000000000000000000..834760f25692c6f9d066cd05d41ef4a0412c4944 --- /dev/null +++ b/arch/arm/mach-msm/proc_comm.h @@ -0,0 +1,165 @@ +/* arch/arm/mach-msm/proc_comm.h + * + * Copyright (c) 2007 QUALCOMM Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_MSM_PROC_COMM_H_ +#define _ARCH_ARM_MACH_MSM_PROC_COMM_H_ + +enum { + PCOM_CMD_IDLE = 0x0, + PCOM_CMD_DONE, + PCOM_RESET_APPS, + PCOM_RESET_CHIP, + PCOM_CONFIG_NAND_MPU, + PCOM_CONFIG_USB_CLKS, + PCOM_GET_POWER_ON_STATUS, + PCOM_GET_WAKE_UP_STATUS, + PCOM_GET_BATT_LEVEL, + PCOM_CHG_IS_CHARGING, + PCOM_POWER_DOWN, + PCOM_USB_PIN_CONFIG, + PCOM_USB_PIN_SEL, + PCOM_SET_RTC_ALARM, + PCOM_NV_READ, + PCOM_NV_WRITE, + PCOM_GET_UUID_HIGH, + PCOM_GET_UUID_LOW, + PCOM_GET_HW_ENTROPY, + PCOM_RPC_GPIO_TLMM_CONFIG_REMOTE, + PCOM_CLKCTL_RPC_ENABLE, + PCOM_CLKCTL_RPC_DISABLE, + PCOM_CLKCTL_RPC_RESET, + PCOM_CLKCTL_RPC_SET_FLAGS, + PCOM_CLKCTL_RPC_SET_RATE, + PCOM_CLKCTL_RPC_MIN_RATE, + PCOM_CLKCTL_RPC_MAX_RATE, + PCOM_CLKCTL_RPC_RATE, + PCOM_CLKCTL_RPC_PLL_REQUEST, + PCOM_CLKCTL_RPC_ENABLED, + PCOM_VREG_SWITCH, + PCOM_VREG_SET_LEVEL, + PCOM_GPIO_TLMM_CONFIG_GROUP, + PCOM_GPIO_TLMM_UNCONFIG_GROUP, + PCOM_NV_WRITE_BYTES_4_7, + PCOM_CONFIG_DISP, + PCOM_GET_FTM_BOOT_COUNT, + PCOM_RPC_GPIO_TLMM_CONFIG_EX, + PCOM_PM_MPP_CONFIG, + PCOM_GPIO_IN, + PCOM_GPIO_OUT, + PCOM_RESET_MODEM, + PCOM_RESET_CHIP_IMM, + PCOM_PM_VID_EN, + PCOM_VREG_PULLDOWN, + PCOM_NUM_CMDS, +}; + +enum { + PCOM_INVALID_STATUS = 0x0, + PCOM_READY, + PCOM_CMD_RUNNING, + PCOM_CMD_SUCCESS, + PCOM_CMD_FAIL, +}; + +/* List of VREGs that support the Pull Down Resistor setting. */ +enum { + PM_VREG_PDOWN_MSMA_ID, + PM_VREG_PDOWN_MSMP_ID, + PM_VREG_PDOWN_MSME1_ID, /* Not supported in Panoramix */ + PM_VREG_PDOWN_MSMC1_ID, /* Not supported in PM6620 */ + PM_VREG_PDOWN_MSMC2_ID, /* Supported in PM7500 only */ + PM_VREG_PDOWN_GP3_ID, /* Supported in PM7500 only */ + PM_VREG_PDOWN_MSME2_ID, /* Supported in PM7500 and Panoramix only */ + PM_VREG_PDOWN_GP4_ID, /* Supported in PM7500 only */ + PM_VREG_PDOWN_GP1_ID, /* Supported in PM7500 only */ + PM_VREG_PDOWN_TCXO_ID, + PM_VREG_PDOWN_PA_ID, + PM_VREG_PDOWN_RFTX_ID, + PM_VREG_PDOWN_RFRX1_ID, + PM_VREG_PDOWN_RFRX2_ID, + PM_VREG_PDOWN_SYNT_ID, + PM_VREG_PDOWN_WLAN_ID, + PM_VREG_PDOWN_USB_ID, + PM_VREG_PDOWN_MMC_ID, + PM_VREG_PDOWN_RUIM_ID, + PM_VREG_PDOWN_MSMC0_ID, /* Supported in PM6610 only */ + PM_VREG_PDOWN_GP2_ID, /* Supported in PM7500 only */ + PM_VREG_PDOWN_GP5_ID, /* Supported in PM7500 only */ + PM_VREG_PDOWN_GP6_ID, /* Supported in PM7500 only */ + PM_VREG_PDOWN_RF_ID, + PM_VREG_PDOWN_RF_VCO_ID, + PM_VREG_PDOWN_MPLL_ID, + PM_VREG_PDOWN_S2_ID, + PM_VREG_PDOWN_S3_ID, + PM_VREG_PDOWN_RFUBM_ID, + + /* new for HAN */ + PM_VREG_PDOWN_RF1_ID, + PM_VREG_PDOWN_RF2_ID, + PM_VREG_PDOWN_RFA_ID, + PM_VREG_PDOWN_CDC2_ID, + PM_VREG_PDOWN_RFTX2_ID, + PM_VREG_PDOWN_USIM_ID, + PM_VREG_PDOWN_USB2P6_ID, + PM_VREG_PDOWN_USB3P3_ID, + PM_VREG_PDOWN_INVALID_ID, + + /* backward compatible enums only */ + PM_VREG_PDOWN_CAM_ID = PM_VREG_PDOWN_GP1_ID, + PM_VREG_PDOWN_MDDI_ID = PM_VREG_PDOWN_GP2_ID, + PM_VREG_PDOWN_RUIM2_ID = PM_VREG_PDOWN_GP3_ID, + PM_VREG_PDOWN_AUX_ID = PM_VREG_PDOWN_GP4_ID, + PM_VREG_PDOWN_AUX2_ID = PM_VREG_PDOWN_GP5_ID, + PM_VREG_PDOWN_BT_ID = PM_VREG_PDOWN_GP6_ID, + + PM_VREG_PDOWN_MSME_ID = PM_VREG_PDOWN_MSME1_ID, + PM_VREG_PDOWN_MSMC_ID = PM_VREG_PDOWN_MSMC1_ID, + PM_VREG_PDOWN_RFA1_ID = PM_VREG_PDOWN_RFRX2_ID, + PM_VREG_PDOWN_RFA2_ID = PM_VREG_PDOWN_RFTX2_ID, + PM_VREG_PDOWN_XO_ID = PM_VREG_PDOWN_TCXO_ID +}; + +/* gpio info for PCOM_RPC_GPIO_TLMM_CONFIG_EX */ + +#define GPIO_ENABLE 0 +#define GPIO_DISABLE 1 + +#define GPIO_INPUT 0 +#define GPIO_OUTPUT 1 + +#define GPIO_NO_PULL 0 +#define GPIO_PULL_DOWN 1 +#define GPIO_KEEPER 2 +#define GPIO_PULL_UP 3 + +#define GPIO_2MA 0 +#define GPIO_4MA 1 +#define GPIO_6MA 2 +#define GPIO_8MA 3 +#define GPIO_10MA 4 +#define GPIO_12MA 5 +#define GPIO_14MA 6 +#define GPIO_16MA 7 + +#define PCOM_GPIO_CFG(gpio, func, dir, pull, drvstr) \ + ((((gpio) & 0x3FF) << 4) | \ + ((func) & 0xf) | \ + (((dir) & 0x1) << 14) | \ + (((pull) & 0x3) << 15) | \ + (((drvstr) & 0xF) << 17)) + +int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2); + +#endif diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2bffe9b7e9fe4e726b1b2814675390a03de4629c..345a14cb73c38debc9e6bf55caac2b56700dea1c 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -45,7 +45,7 @@ struct msm_clock { struct clock_event_device clockevent; struct clocksource clocksource; struct irqaction irq; - uint32_t regbase; + void __iomem *regbase; uint32_t freq; uint32_t shift; }; diff --git a/arch/arm/mach-msm/vreg.c b/arch/arm/mach-msm/vreg.c new file mode 100644 index 0000000000000000000000000000000000000000..fcb0b9f256841a291616d85cd1099a09bd8bc088 --- /dev/null +++ b/arch/arm/mach-msm/vreg.c @@ -0,0 +1,143 @@ +/* arch/arm/mach-msm/vreg.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "proc_comm.h" + +struct vreg { + const char *name; + unsigned id; +}; + +#define VREG(_name, _id) { .name = _name, .id = _id, } + +static struct vreg vregs[] = { + VREG("msma", 0), + VREG("msmp", 1), + VREG("msme1", 2), + VREG("msmc1", 3), + VREG("msmc2", 4), + VREG("gp3", 5), + VREG("msme2", 6), + VREG("gp4", 7), + VREG("gp1", 8), + VREG("tcxo", 9), + VREG("pa", 10), + VREG("rftx", 11), + VREG("rfrx1", 12), + VREG("rfrx2", 13), + VREG("synt", 14), + VREG("wlan", 15), + VREG("usb", 16), + VREG("boost", 17), + VREG("mmc", 18), + VREG("ruim", 19), + VREG("msmc0", 20), + VREG("gp2", 21), + VREG("gp5", 22), + VREG("gp6", 23), + VREG("rf", 24), + VREG("rf_vco", 26), + VREG("mpll", 27), + VREG("s2", 28), + VREG("s3", 29), + VREG("rfubm", 30), + VREG("ncp", 31), +}; + +struct vreg *vreg_get(struct device *dev, const char *id) +{ + int n; + for (n = 0; n < ARRAY_SIZE(vregs); n++) { + if (!strcmp(vregs[n].name, id)) + return vregs + n; + } + return 0; +} + +void vreg_put(struct vreg *vreg) +{ +} + +int vreg_enable(struct vreg *vreg) +{ + unsigned id = vreg->id; + unsigned enable = 1; + return msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable); +} + +void vreg_disable(struct vreg *vreg) +{ + unsigned id = vreg->id; + unsigned enable = 0; + msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable); +} + +int vreg_set_level(struct vreg *vreg, unsigned mv) +{ + unsigned id = vreg->id; + return msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv); +} + +#if defined(CONFIG_DEBUG_FS) + +static int vreg_debug_set(void *data, u64 val) +{ + struct vreg *vreg = data; + switch (val) { + case 0: + vreg_disable(vreg); + break; + case 1: + vreg_enable(vreg); + break; + default: + vreg_set_level(vreg, val); + break; + } + return 0; +} + +static int vreg_debug_get(void *data, u64 *val) +{ + return -ENOSYS; +} + +DEFINE_SIMPLE_ATTRIBUTE(vreg_fops, vreg_debug_get, vreg_debug_set, "%llu\n"); + +static int __init vreg_debug_init(void) +{ + struct dentry *dent; + int n; + + dent = debugfs_create_dir("vreg", 0); + if (IS_ERR(dent)) + return 0; + + for (n = 0; n < ARRAY_SIZE(vregs); n++) + (void) debugfs_create_file(vregs[n].name, 0644, + dent, vregs + n, &vreg_fops); + + return 0; +} + +device_initcall(vreg_debug_init); +#endif diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c index d354e0fe4477ad450668b16e462258e26b2b4853..c40fc378a251244bce6e6c0ad9f0e177c7326a75 100644 --- a/arch/arm/mach-omap2/irq.c +++ b/arch/arm/mach-omap2/irq.c @@ -119,7 +119,7 @@ static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank) void __init omap_init_irq(void) { - unsigned long nr_irqs = 0; + unsigned long nr_of_irqs = 0; unsigned int nr_banks = 0; int i; @@ -133,14 +133,14 @@ void __init omap_init_irq(void) omap_irq_bank_init_one(bank); - nr_irqs += bank->nr_irqs; + nr_of_irqs += bank->nr_irqs; nr_banks++; } printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n", - nr_irqs, nr_banks, nr_banks > 1 ? "s" : ""); + nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : ""); - for (i = 0; i < nr_irqs; i++) { + for (i = 0; i < nr_of_irqs; i++) { set_irq_chip(i, &omap_irq_chip); set_irq_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID); diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h index eb4b190b6657256898dfbc2da475a32a98ee6f4d..eb35fca9aea527bde9992f80917fd69cc174c346 100644 --- a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h +++ b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h @@ -4,6 +4,43 @@ #include #include +struct pxa3xx_nand_timing { + unsigned int tCH; /* Enable signal hold time */ + unsigned int tCS; /* Enable signal setup time */ + unsigned int tWH; /* ND_nWE high duration */ + unsigned int tWP; /* ND_nWE pulse time */ + unsigned int tRH; /* ND_nRE high duration */ + unsigned int tRP; /* ND_nRE pulse width */ + unsigned int tR; /* ND_nWE high to ND_nRE low for read */ + unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */ + unsigned int tAR; /* ND_ALE low to ND_nRE low delay */ +}; + +struct pxa3xx_nand_cmdset { + uint16_t read1; + uint16_t read2; + uint16_t program; + uint16_t read_status; + uint16_t read_id; + uint16_t erase; + uint16_t reset; + uint16_t lock; + uint16_t unlock; + uint16_t lock_status; +}; + +struct pxa3xx_nand_flash { + const struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ + const struct pxa3xx_nand_cmdset *cmdset; + + uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */ + uint32_t page_size; /* Page size in bytes (PAGE_SZ) */ + uint32_t flash_width; /* Width of Flash memory (DWIDTH_M) */ + uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */ + uint32_t num_blocks; /* Number of physical blocks in Flash */ + uint32_t chip_id; +}; + struct pxa3xx_nand_platform_data { /* the data flash bus is shared between the Static Memory @@ -12,8 +49,11 @@ struct pxa3xx_nand_platform_data { */ int enable_arbiter; - struct mtd_partition *parts; - unsigned int nr_parts; + const struct mtd_partition *parts; + unsigned int nr_parts; + + const struct pxa3xx_nand_flash * flash; + size_t num_flash; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h index a72803f0461bbe4320cebcaad410d2c17e5f685f..8bce6d8615b9ddec02856941fd1e700d79c505de 100644 --- a/arch/arm/mach-pxa/include/mach/tosa.h +++ b/arch/arm/mach-pxa/include/mach/tosa.h @@ -59,8 +59,6 @@ * TC6393XB GPIOs */ #define TOSA_TC6393XB_GPIO_BASE (NR_BUILTIN_GPIO + 2 * 12) -#define TOSA_TC6393XB_GPIO(i) (TOSA_TC6393XB_GPIO_BASE + (i)) -#define TOSA_TC6393XB_GPIO_BIT(gpio) (1 << (gpio - TOSA_TC6393XB_GPIO_BASE)) #define TOSA_GPIO_TG_ON (TOSA_TC6393XB_GPIO_BASE + 0) #define TOSA_GPIO_L_MUTE (TOSA_TC6393XB_GPIO_BASE + 1) diff --git a/arch/arm/mach-pxa/include/mach/zylonite.h b/arch/arm/mach-pxa/include/mach/zylonite.h index 0d35ca04731e485fa655c5dccd087c59fd58fb0d..bf6785adccf45a02e725422994d17d77138d2bf9 100644 --- a/arch/arm/mach-pxa/include/mach/zylonite.h +++ b/arch/arm/mach-pxa/include/mach/zylonite.h @@ -30,7 +30,7 @@ extern void zylonite_pxa300_init(void); static inline void zylonite_pxa300_init(void) { if (cpu_is_pxa300() || cpu_is_pxa310()) - panic("%s: PXA300/PXA310 not supported\n", __FUNCTION__); + panic("%s: PXA300/PXA310 not supported\n", __func__); } #endif @@ -40,7 +40,7 @@ extern void zylonite_pxa320_init(void); static inline void zylonite_pxa320_init(void) { if (cpu_is_pxa320()) - panic("%s: PXA320 not supported\n", __FUNCTION__); + panic("%s: PXA320 not supported\n", __func__); } #endif diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 130e37e4ebdd2292aef62443a8614e89d8dd92bd..a6c4694359cabfdde423f8e669cac9dfeaca9163 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -706,16 +706,39 @@ static struct tmio_nand_data tosa_tc6393xb_nand_config = { .badblock_pattern = &tosa_tc6393xb_nand_bbt, }; -static struct tc6393xb_platform_data tosa_tc6393xb_setup = { +static int tosa_tc6393xb_setup(struct platform_device *dev) +{ + int rc; + + rc = gpio_request(TOSA_GPIO_CARD_VCC_ON, "CARD_VCC_ON"); + if (rc) + goto err_req; + + rc = gpio_direction_output(TOSA_GPIO_CARD_VCC_ON, 1); + if (rc) + goto err_dir; + + return rc; + +err_dir: + gpio_free(TOSA_GPIO_CARD_VCC_ON); +err_req: + return rc; +} + +static void tosa_tc6393xb_teardown(struct platform_device *dev) +{ + gpio_free(TOSA_GPIO_CARD_VCC_ON); +} + +static struct tc6393xb_platform_data tosa_tc6393xb_data = { .scr_pll2cr = 0x0cc1, .scr_gper = 0x3300, - .scr_gpo_dsr = - TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON), - .scr_gpo_doecr = - TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON), .irq_base = IRQ_BOARD_START, .gpio_base = TOSA_TC6393XB_GPIO_BASE, + .setup = tosa_tc6393xb_setup, + .teardown = tosa_tc6393xb_teardown, .enable = tosa_tc6393xb_enable, .disable = tosa_tc6393xb_disable, @@ -723,6 +746,8 @@ static struct tc6393xb_platform_data tosa_tc6393xb_setup = { .resume = tosa_tc6393xb_resume, .nand_data = &tosa_tc6393xb_nand_config, + + .resume_restore = 1, }; @@ -730,7 +755,7 @@ static struct platform_device tc6393xb_device = { .name = "tc6393xb", .id = -1, .dev = { - .platform_data = &tosa_tc6393xb_setup, + .platform_data = &tosa_tc6393xb_data, }, .num_resources = ARRAY_SIZE(tc6393xb_resources), .resource = tc6393xb_resources, diff --git a/arch/arm/mach-sa1100/include/mach/ide.h b/arch/arm/mach-sa1100/include/mach/ide.h deleted file mode 100644 index 4c99c8f5e6172dd9587fdf318e813712aaf730d7..0000000000000000000000000000000000000000 --- a/arch/arm/mach-sa1100/include/mach/ide.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * arch/arm/mach-sa1100/include/mach/ide.h - * - * Copyright (c) 1998 Hugo Fiennes & Nicolas Pitre - * - * 18-aug-2000: Cleanup by Erik Mouw (J.A.K.Mouw@its.tudelft.nl) - * Get rid of the special ide_init_hwif_ports() functions - * and make a generalised function that can be used by all - * architectures. - */ - -#include -#include -#include - -#error "This code is broken and needs update to match with current ide support" - - -/* - * Set up a hw structure for a specified data port, control port and IRQ. - * This should follow whatever the default interface uses. - */ -static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, - unsigned long ctrl_port, int *irq) -{ - unsigned long reg = data_port; - int i; - int regincr = 1; - - /* The Empeg board has the first two address lines unused */ - if (machine_is_empeg()) - regincr = 1 << 2; - - /* The LART doesn't use A0 for IDE */ - if (machine_is_lart()) - regincr = 1 << 1; - - memset(hw, 0, sizeof(*hw)); - - for (i = 0; i <= 7; i++) { - hw->io_ports_array[i] = reg; - reg += regincr; - } - - hw->io_ports.ctl_addr = ctrl_port; - - if (irq) - *irq = 0; -} - -/* - * This registers the standard ports for this architecture with the IDE - * driver. - */ -static __inline__ void -ide_init_default_hwifs(void) -{ - if (machine_is_lart()) { -#ifdef CONFIG_SA1100_LART - hw_regs_t hw; - - /* Enable GPIO as interrupt line */ - GPDR &= ~LART_GPIO_IDE; - set_irq_type(LART_IRQ_IDE, IRQ_TYPE_EDGE_RISING); - - /* set PCMCIA interface timing */ - MECR = 0x00060006; - - /* init the interface */ - ide_init_hwif_ports(&hw, PCMCIA_IO_0_BASE + 0x0000, PCMCIA_IO_0_BASE + 0x1000, NULL); - hw.irq = LART_IRQ_IDE; - ide_register_hw(&hw); -#endif - } -} diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index d1193884d76d9ce4540b5d2a9d11e94e0233536b..ab5f7a21350b3af41bd6a227d1920f09c8091ed1 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -400,9 +400,9 @@ config CPU_FEROCEON_OLD_ID # ARMv6 config CPU_V6 bool "Support ARM V6 processor" - depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3 || ARCH_MSM7X00A || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 + depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3 || ARCH_MSM || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 default y if ARCH_MX3 - default y if ARCH_MSM7X00A + default y if ARCH_MSM select CPU_32v6 select CPU_ABRT_EV6 select CPU_PABRT_NOIFAR diff --git a/arch/arm/plat-mxc/include/mach/mxc_nand.h b/arch/arm/plat-mxc/include/mach/mxc_nand.h new file mode 100644 index 0000000000000000000000000000000000000000..2b972df22d122f62f2597b8fd7656b741ead746b --- /dev/null +++ b/arch/arm/plat-mxc/include/mach/mxc_nand.h @@ -0,0 +1,27 @@ +/* + * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Sascha Hauer, kernel@pengutronix.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#ifndef __ASM_ARCH_NAND_H +#define __ASM_ARCH_NAND_H + +struct mxc_nand_platform_data { + int width; /* data bus width in bytes */ + int hw_ecc; /* 0 if supress hardware ECC */ +}; +#endif /* __ASM_ARCH_NAND_H */ diff --git a/arch/arm/plat-omap/include/mach/onenand.h b/arch/arm/plat-omap/include/mach/onenand.h index d57f20226b28733743c35455bb798836ba34f299..4649d302c263723abb48c6c33579d39f96813e4f 100644 --- a/arch/arm/plat-omap/include/mach/onenand.h +++ b/arch/arm/plat-omap/include/mach/onenand.h @@ -16,6 +16,10 @@ struct omap_onenand_platform_data { int gpio_irq; struct mtd_partition *parts; int nr_parts; - int (*onenand_setup)(void __iomem *); + int (*onenand_setup)(void __iomem *, int freq); int dma_channel; }; + +int omap2_onenand_rephase(void); + +#define ONENAND_MAX_PARTITIONS 8 diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 7c239a916275b6b66b326e7fc93c6207bdbaf270..33a5b2969eb4addf24e36a59159d72488530dc0b 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -72,6 +72,8 @@ config GENERIC_BUG source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "System Type and features" source "kernel/time/Kconfig" diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index 294b25f9323dda6a4885de1a6b874b4a6a3bb825..4442f8d2d4239a09470e04583aa9f168583246d9 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h @@ -96,6 +96,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) +#define _TIF_FREEZE (1 << TIF_FREEZE) /* Note: The masks below must never span more than 16 bits! */ diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index c36a6d59d6f0785e900a7e905189e577bd4693bc..310477ba1bbf170a47af4e68bfc4bfa26d836a8d 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c @@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) struct eic *eic; struct resource *regs; unsigned int i; - unsigned int nr_irqs; + unsigned int nr_of_irqs; unsigned int int_irq; int ret; u32 pattern; @@ -224,7 +224,7 @@ static int __init eic_probe(struct platform_device *pdev) eic_writel(eic, IDR, ~0UL); eic_writel(eic, MODE, ~0UL); pattern = eic_readl(eic, MODE); - nr_irqs = fls(pattern); + nr_of_irqs = fls(pattern); /* Trigger on low level unless overridden by driver */ eic_writel(eic, EDGE, 0UL); @@ -232,7 +232,7 @@ static int __init eic_probe(struct platform_device *pdev) eic->chip = &eic_chip; - for (i = 0; i < nr_irqs; i++) { + for (i = 0; i < nr_of_irqs; i++) { set_irq_chip_and_handler(eic->first_irq + i, &eic_chip, handle_level_irq); set_irq_chip_data(eic->first_irq + i, eic); @@ -256,7 +256,7 @@ static int __init eic_probe(struct platform_device *pdev) eic->regs, int_irq); dev_info(&pdev->dev, "Handling %u external IRQs, starting with IRQ %u\n", - nr_irqs, eic->first_irq); + nr_of_irqs, eic->first_irq); return 0; diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 8102c79aaa94044369612ab3a8dc3bb6e33aa807..29e71ed6b8a7fb9c5e4b8381244a4c3d5b1d47db 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -64,8 +64,11 @@ config HARDWARE_PM depends on OPROFILE source "init/Kconfig" + source "kernel/Kconfig.preempt" +source "kernel/Kconfig.freezer" + menu "Blackfin Processor Options" comment "Processor and Board Settings" diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 9389d38f222f2f749497b106872278a7da5bdd0b..07335e719bf835d7ad5e88a6b844f07210b4b6ba 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -62,6 +62,8 @@ config HZ source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "General setup" source "fs/Kconfig.binfmt" diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c index c9aa3904be05610f35425ced24c0a88a93383350..3bdfaf43390ce00b03ead8226136b3849a63e1e6 100644 --- a/arch/cris/arch-v10/drivers/ds1302.c +++ b/arch/cris/arch-v10/drivers/ds1302.c @@ -215,12 +215,12 @@ get_rtc_time(struct rtc_time *rtc_tm) local_irq_restore(flags); - BCD_TO_BIN(rtc_tm->tm_sec); - BCD_TO_BIN(rtc_tm->tm_min); - BCD_TO_BIN(rtc_tm->tm_hour); - BCD_TO_BIN(rtc_tm->tm_mday); - BCD_TO_BIN(rtc_tm->tm_mon); - BCD_TO_BIN(rtc_tm->tm_year); + rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); + rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); + rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); + rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); + rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); + rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); /* * Account for differences between how the RTC uses the values @@ -295,12 +295,12 @@ rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, else yrs -= 1900; /* RTC (70, 71, ... 99) */ - BIN_TO_BCD(sec); - BIN_TO_BCD(min); - BIN_TO_BCD(hrs); - BIN_TO_BCD(day); - BIN_TO_BCD(mon); - BIN_TO_BCD(yrs); + sec = bin2bcd(sec); + min = bin2bcd(min); + hrs = bin2bcd(hrs); + day = bin2bcd(day); + mon = bin2bcd(mon); + yrs = bin2bcd(yrs); local_irq_save(flags); CMOS_WRITE(yrs, RTC_YEAR); diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c index 8769dc914073804ce0c75240e0aa6b60fc7c602a..1e90c1a9c849bd4453b462e3b611808171e5c475 100644 --- a/arch/cris/arch-v10/drivers/pcf8563.c +++ b/arch/cris/arch-v10/drivers/pcf8563.c @@ -122,7 +122,7 @@ get_rtc_time(struct rtc_time *tm) "information is no longer guaranteed!\n", PCF8563_NAME); } - tm->tm_year = BCD_TO_BIN(tm->tm_year) + + tm->tm_year = bcd2bin(tm->tm_year) + ((tm->tm_mon & 0x80) ? 100 : 0); tm->tm_sec &= 0x7F; tm->tm_min &= 0x7F; @@ -131,11 +131,11 @@ get_rtc_time(struct rtc_time *tm) tm->tm_wday &= 0x07; /* Not coded in BCD. */ tm->tm_mon &= 0x1F; - BCD_TO_BIN(tm->tm_sec); - BCD_TO_BIN(tm->tm_min); - BCD_TO_BIN(tm->tm_hour); - BCD_TO_BIN(tm->tm_mday); - BCD_TO_BIN(tm->tm_mon); + tm->tm_sec = bcd2bin(tm->tm_sec); + tm->tm_min = bcd2bin(tm->tm_min); + tm->tm_hour = bcd2bin(tm->tm_hour); + tm->tm_mday = bcd2bin(tm->tm_mday); + tm->tm_mon = bcd2bin(tm->tm_mon); tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */ } @@ -282,12 +282,12 @@ int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, century = (tm.tm_year >= 2000) ? 0x80 : 0; tm.tm_year = tm.tm_year % 100; - BIN_TO_BCD(tm.tm_year); - BIN_TO_BCD(tm.tm_mon); - BIN_TO_BCD(tm.tm_mday); - BIN_TO_BCD(tm.tm_hour); - BIN_TO_BCD(tm.tm_min); - BIN_TO_BCD(tm.tm_sec); + tm.tm_year = bin2bcd(tm.tm_year); + tm.tm_mon = bin2bcd(tm.tm_mon); + tm.tm_mday = bin2bcd(tm.tm_mday); + tm.tm_hour = bin2bcd(tm.tm_hour); + tm.tm_min = bin2bcd(tm.tm_min); + tm.tm_sec = bin2bcd(tm.tm_sec); tm.tm_mon |= century; mutex_lock(&rtc_lock); diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c index f263ab571221cb66e5fc200659069ea2291450d7..f4478506e52ca1df58b52180ffd871e77c51e726 100644 --- a/arch/cris/arch-v32/drivers/pcf8563.c +++ b/arch/cris/arch-v32/drivers/pcf8563.c @@ -118,7 +118,7 @@ get_rtc_time(struct rtc_time *tm) "information is no longer guaranteed!\n", PCF8563_NAME); } - tm->tm_year = BCD_TO_BIN(tm->tm_year) + + tm->tm_year = bcd2bin(tm->tm_year) + ((tm->tm_mon & 0x80) ? 100 : 0); tm->tm_sec &= 0x7F; tm->tm_min &= 0x7F; @@ -127,11 +127,11 @@ get_rtc_time(struct rtc_time *tm) tm->tm_wday &= 0x07; /* Not coded in BCD. */ tm->tm_mon &= 0x1F; - BCD_TO_BIN(tm->tm_sec); - BCD_TO_BIN(tm->tm_min); - BCD_TO_BIN(tm->tm_hour); - BCD_TO_BIN(tm->tm_mday); - BCD_TO_BIN(tm->tm_mon); + tm->tm_sec = bcd2bin(tm->tm_sec); + tm->tm_min = bcd2bin(tm->tm_min); + tm->tm_hour = bcd2bin(tm->tm_hour); + tm->tm_mday = bcd2bin(tm->tm_mday); + tm->tm_mon = bcd2bin(tm->tm_mon); tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */ } @@ -279,12 +279,12 @@ int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, century = (tm.tm_year >= 2000) ? 0x80 : 0; tm.tm_year = tm.tm_year % 100; - BIN_TO_BCD(tm.tm_year); - BIN_TO_BCD(tm.tm_mon); - BIN_TO_BCD(tm.tm_mday); - BIN_TO_BCD(tm.tm_hour); - BIN_TO_BCD(tm.tm_min); - BIN_TO_BCD(tm.tm_sec); + tm.tm_year = bin2bcd(tm.tm_year); + tm.tm_mon = bin2bcd(tm.tm_mon); + tm.tm_mday = bin2bcd(tm.tm_mday); + tm.tm_hour = bin2bcd(tm.tm_hour); + tm.tm_min = bin2bcd(tm.tm_min); + tm.tm_sec = bin2bcd(tm.tm_sec); tm.tm_mon |= century; mutex_lock(&rtc_lock); diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c index ff4c6aa75defa157e1e4bd436cf47d4c4526e415..074fe7dea96bbaa44c27f6ef513253552646a756 100644 --- a/arch/cris/kernel/time.c +++ b/arch/cris/kernel/time.c @@ -127,7 +127,7 @@ int set_rtc_mmss(unsigned long nowtime) return 0; cmos_minutes = CMOS_READ(RTC_MINUTES); - BCD_TO_BIN(cmos_minutes); + cmos_minutes = bcd2bin(cmos_minutes); /* * since we're only adjusting minutes and seconds, @@ -142,8 +142,8 @@ int set_rtc_mmss(unsigned long nowtime) real_minutes %= 60; if (abs(real_minutes - cmos_minutes) < 30) { - BIN_TO_BCD(real_seconds); - BIN_TO_BCD(real_minutes); + real_seconds = bin2bcd(real_seconds); + real_minutes = bin2bcd(real_minutes); CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); } else { @@ -170,12 +170,12 @@ get_cmos_time(void) mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hour); - BCD_TO_BIN(day); - BCD_TO_BIN(mon); - BCD_TO_BIN(year); + sec = bcd2bin(sec); + min = bcd2bin(min); + hour = bcd2bin(hour); + day = bcd2bin(day); + mon = bcd2bin(mon); + year = bcd2bin(year); if ((year += 1900) < 1970) year += 100; diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index a5aac1b075628b8a30004d52bb73d2e1ce7d978f..9d1552a9ee2c88ddb40bc7d70dea316f725d2843 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -66,6 +66,8 @@ mainmenu "Fujitsu FR-V Kernel Configuration" source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Fujitsu FR-V system setup" diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index c7966746fbfec582938b77a37baaed384d1b0ec2..bd1995403c67d0f9b37b34674a6c80ab7ee2a76b 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -90,6 +90,8 @@ config HZ source "init/Kconfig" +source "kernel/Kconfig.freezer" + source "arch/h8300/Kconfig.cpu" menu "Executable file formats" diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index aafd4d322ec3e2e8c6767aac9ea125587ff4b322..700014d2155fd8d82a4aeadb54c4b3ad682855bb 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h @@ -89,6 +89,7 @@ static inline struct thread_info *current_thread_info(void) TIF_NEED_RESCHED */ #define TIF_MEMDIE 4 #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ +#define TIF_FREEZE 16 /* is freezing for suspend */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< #include +#include #include #include +/* Stores the physical address of elf header of crash image. */ +unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; + /** * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 51b75cea701866629449f7945ddef10079faa7ab..efaff15d8cf1dc9cad35a767a53b3865d51037b4 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -1335,7 +1335,7 @@ kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) } #endif -#ifdef CONFIG_PROC_VMCORE +#ifdef CONFIG_CRASH_DUMP /* locate the size find a the descriptor at a certain address */ unsigned long __init vmcore_find_descriptor_size (unsigned long address) diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index de636b215677e801c23076d74f6d5a2d29e68efb..916ba898237f27a6eca8d5b79cc92cce86b716f1 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -352,7 +352,7 @@ reserve_memory (void) } #endif -#ifdef CONFIG_PROC_VMCORE +#ifdef CONFIG_CRASH_KERNEL if (reserve_elfcorehdr(&rsvd_region[n].start, &rsvd_region[n].end) == 0) n++; @@ -478,7 +478,12 @@ static __init int setup_nomca(char *s) } early_param("nomca", setup_nomca); -#ifdef CONFIG_PROC_VMCORE +/* + * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by + * is_kdump_kernel() to determine if we are booting after a panic. Hence + * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. + */ +#ifdef CONFIG_CRASH_DUMP /* elfcorehdr= specifies the location of elf core header * stored by the crashed kernel. */ @@ -502,11 +507,11 @@ int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) * to work properly. */ - if (elfcorehdr_addr >= ELFCORE_ADDR_MAX) + if (!is_vmcore_usable()) return -EINVAL; if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { - elfcorehdr_addr = ELFCORE_ADDR_MAX; + vmcore_unusable(); return -EINVAL; } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index f482a9098e32aadec0fc194befc4906a6be7f6b2..054bcd9439aa887a724fc031ec91701b7fefb218 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -700,23 +700,6 @@ int arch_add_memory(int nid, u64 start, u64 size) return ret; } -#ifdef CONFIG_MEMORY_HOTREMOVE -int remove_memory(u64 start, u64 size) -{ - unsigned long start_pfn, end_pfn; - unsigned long timeout = 120 * HZ; - int ret; - start_pfn = start >> PAGE_SHIFT; - end_pfn = start_pfn + (size >> PAGE_SHIFT); - ret = offline_pages(start_pfn, end_pfn, timeout); - if (ret) - goto out; - /* we can free mem_map at this point */ -out: - return ret; -} -EXPORT_SYMBOL_GPL(remove_memory); -#endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7545037a86254f1f5da925f9b96f80fafcfc1a33..211fcfd115f91f1e6ec169e8abd9849c257b975f 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -614,12 +614,17 @@ char *ia64_pci_get_legacy_mem(struct pci_bus *bus) * vector to get the base address. */ int -pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) +pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state) { unsigned long size = vma->vm_end - vma->vm_start; pgprot_t prot; char *addr; + /* We only support mmap'ing of legacy memory space */ + if (mmap_state != pci_mmap_mem) + return -ENOSYS; + /* * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt * for more details. diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 00289c178f895a6ad62a7fb34603af08d52bc961..dbaed4a638153bc43c5fa4d6e138039bb5cca8df 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -42,6 +42,8 @@ config HZ source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Processor type and features" diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index fc2994811f150c991986b6294538ca5b9c6a64ab..39cb6da72dcbef3358d04e2b419a150f88c299ba 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -40,6 +40,7 @@ */ #include +#include #include #include #include diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 677c93a490f6f49501e4c7d2e375fd42bb4dabc5..836fb66f080dcbadeb5f6d96fba8f01679d85d02 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -62,6 +62,8 @@ mainmenu "Linux/68k Kernel Configuration" source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Platform dependent setup" config EISA diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c index 808c9018b115022dd2ba8dee88a070fc003448ce..c50bec8aabb1983e3517857b5abafa75f91b7ace 100644 --- a/arch/m68k/bvme6000/rtc.c +++ b/arch/m68k/bvme6000/rtc.c @@ -18,7 +18,6 @@ #include #include #include /* For struct rtc_time and ioctls, etc */ -#include #include #include diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index 0a8998315e5ed9fcdada66d87c5ee1127e35b9ac..76b66feb74df82d42b21a4b8ac67d6df0f498812 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig @@ -75,6 +75,8 @@ config NO_IOPORT source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Processor type and features" choice diff --git a/arch/m68knommu/include/asm/thread_info.h b/arch/m68knommu/include/asm/thread_info.h index 0c9bc095f3f0bc3cae753f788f72786a67b96512..82529f424ea3d567d0c1a9a2991b9591f7dacd85 100644 --- a/arch/m68knommu/include/asm/thread_info.h +++ b/arch/m68knommu/include/asm/thread_info.h @@ -84,12 +84,14 @@ static inline struct thread_info *current_thread_info(void) #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 4 +#define TIF_FREEZE 16 /* is freezing for suspend */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<control = 0x40; - year = BCD2BIN(m48t37_base->year); - year += BCD2BIN(m48t37_base->century) * 100; + year = bcd2bin(m48t37_base->year); + year += bcd2bin(m48t37_base->century) * 100; - month = BCD2BIN(m48t37_base->month); - day = BCD2BIN(m48t37_base->date); - hour = BCD2BIN(m48t37_base->hour); - min = BCD2BIN(m48t37_base->min); - sec = BCD2BIN(m48t37_base->sec); + month = bcd2bin(m48t37_base->month); + day = bcd2bin(m48t37_base->date); + hour = bcd2bin(m48t37_base->hour); + min = bcd2bin(m48t37_base->min); + sec = bcd2bin(m48t37_base->sec); /* Start the update to the time again */ m48t37_base->control = 0x00; @@ -113,22 +113,22 @@ int rtc_mips_set_time(unsigned long tim) m48t37_base->control = 0x80; /* year */ - m48t37_base->year = BIN2BCD(tm.tm_year % 100); - m48t37_base->century = BIN2BCD(tm.tm_year / 100); + m48t37_base->year = bin2bcd(tm.tm_year % 100); + m48t37_base->century = bin2bcd(tm.tm_year / 100); /* month */ - m48t37_base->month = BIN2BCD(tm.tm_mon); + m48t37_base->month = bin2bcd(tm.tm_mon); /* day */ - m48t37_base->date = BIN2BCD(tm.tm_mday); + m48t37_base->date = bin2bcd(tm.tm_mday); /* hour/min/sec */ - m48t37_base->hour = BIN2BCD(tm.tm_hour); - m48t37_base->min = BIN2BCD(tm.tm_min); - m48t37_base->sec = BIN2BCD(tm.tm_sec); + m48t37_base->hour = bin2bcd(tm.tm_hour); + m48t37_base->min = bin2bcd(tm.tm_min); + m48t37_base->sec = bin2bcd(tm.tm_sec); /* day of week -- not really used, but let's keep it up-to-date */ - m48t37_base->day = BIN2BCD(tm.tm_wday + 1); + m48t37_base->day = bin2bcd(tm.tm_wday + 1); /* disable writing */ m48t37_base->control = 0x00; diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c index 26fbff4c15b1ece152870eef2bdf152cf0e15cc8..b732600b47f5e5e8ad7908e61cd55d22d9f4f6f2 100644 --- a/arch/mips/sibyte/swarm/rtc_m41t81.c +++ b/arch/mips/sibyte/swarm/rtc_m41t81.c @@ -156,32 +156,32 @@ int m41t81_set_time(unsigned long t) */ spin_lock_irqsave(&rtc_lock, flags); - tm.tm_sec = BIN2BCD(tm.tm_sec); + tm.tm_sec = bin2bcd(tm.tm_sec); m41t81_write(M41T81REG_SC, tm.tm_sec); - tm.tm_min = BIN2BCD(tm.tm_min); + tm.tm_min = bin2bcd(tm.tm_min); m41t81_write(M41T81REG_MN, tm.tm_min); - tm.tm_hour = BIN2BCD(tm.tm_hour); + tm.tm_hour = bin2bcd(tm.tm_hour); tm.tm_hour = (tm.tm_hour & 0x3f) | (m41t81_read(M41T81REG_HR) & 0xc0); m41t81_write(M41T81REG_HR, tm.tm_hour); /* tm_wday starts from 0 to 6 */ if (tm.tm_wday == 0) tm.tm_wday = 7; - tm.tm_wday = BIN2BCD(tm.tm_wday); + tm.tm_wday = bin2bcd(tm.tm_wday); m41t81_write(M41T81REG_DY, tm.tm_wday); - tm.tm_mday = BIN2BCD(tm.tm_mday); + tm.tm_mday = bin2bcd(tm.tm_mday); m41t81_write(M41T81REG_DT, tm.tm_mday); /* tm_mon starts from 0, *ick* */ tm.tm_mon ++; - tm.tm_mon = BIN2BCD(tm.tm_mon); + tm.tm_mon = bin2bcd(tm.tm_mon); m41t81_write(M41T81REG_MO, tm.tm_mon); /* we don't do century, everything is beyond 2000 */ tm.tm_year %= 100; - tm.tm_year = BIN2BCD(tm.tm_year); + tm.tm_year = bin2bcd(tm.tm_year); m41t81_write(M41T81REG_YR, tm.tm_year); spin_unlock_irqrestore(&rtc_lock, flags); @@ -209,12 +209,12 @@ unsigned long m41t81_get_time(void) year = m41t81_read(M41T81REG_YR); spin_unlock_irqrestore(&rtc_lock, flags); - sec = BCD2BIN(sec); - min = BCD2BIN(min); - hour = BCD2BIN(hour); - day = BCD2BIN(day); - mon = BCD2BIN(mon); - year = BCD2BIN(year); + sec = bcd2bin(sec); + min = bcd2bin(min); + hour = bcd2bin(hour); + day = bcd2bin(day); + mon = bcd2bin(mon); + year = bcd2bin(year); year += 2000; diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c index ff3e5dabb348edda9707e69b5f18c77a0a69790c..4438b2195c4475d8f1b659422d354de78d07e132 100644 --- a/arch/mips/sibyte/swarm/rtc_xicor1241.c +++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c @@ -124,18 +124,18 @@ int xicor_set_time(unsigned long t) xicor_write(X1241REG_SR, X1241REG_SR_WEL | X1241REG_SR_RWEL); /* trivial ones */ - tm.tm_sec = BIN2BCD(tm.tm_sec); + tm.tm_sec = bin2bcd(tm.tm_sec); xicor_write(X1241REG_SC, tm.tm_sec); - tm.tm_min = BIN2BCD(tm.tm_min); + tm.tm_min = bin2bcd(tm.tm_min); xicor_write(X1241REG_MN, tm.tm_min); - tm.tm_mday = BIN2BCD(tm.tm_mday); + tm.tm_mday = bin2bcd(tm.tm_mday); xicor_write(X1241REG_DT, tm.tm_mday); /* tm_mon starts from 0, *ick* */ tm.tm_mon ++; - tm.tm_mon = BIN2BCD(tm.tm_mon); + tm.tm_mon = bin2bcd(tm.tm_mon); xicor_write(X1241REG_MO, tm.tm_mon); /* year is split */ @@ -148,7 +148,7 @@ int xicor_set_time(unsigned long t) tmp = xicor_read(X1241REG_HR); if (tmp & X1241REG_HR_MIL) { /* 24 hour format */ - tm.tm_hour = BIN2BCD(tm.tm_hour); + tm.tm_hour = bin2bcd(tm.tm_hour); tmp = (tmp & ~0x3f) | (tm.tm_hour & 0x3f); } else { /* 12 hour format, with 0x2 for pm */ @@ -157,7 +157,7 @@ int xicor_set_time(unsigned long t) tmp |= 0x20; tm.tm_hour -= 12; } - tm.tm_hour = BIN2BCD(tm.tm_hour); + tm.tm_hour = bin2bcd(tm.tm_hour); tmp |= tm.tm_hour; } xicor_write(X1241REG_HR, tmp); @@ -191,13 +191,13 @@ unsigned long xicor_get_time(void) y2k = xicor_read(X1241REG_Y2K); spin_unlock_irqrestore(&rtc_lock, flags); - sec = BCD2BIN(sec); - min = BCD2BIN(min); - hour = BCD2BIN(hour); - day = BCD2BIN(day); - mon = BCD2BIN(mon); - year = BCD2BIN(year); - y2k = BCD2BIN(y2k); + sec = bcd2bin(sec); + min = bcd2bin(min); + hour = bcd2bin(hour); + day = bcd2bin(day); + mon = bcd2bin(mon); + year = bcd2bin(year); + y2k = bcd2bin(y2k); year += (y2k * 100); diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index dd557c9cf001be6226441b78cb84753f5e50cb15..9a9f433588792f5030796b2686193f4983c98ce9 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -68,6 +68,8 @@ mainmenu "Matsushita MN10300/AM33 Kernel Configuration" source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Matsushita MN10300 system setup" diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c index 042f792d843052c7396ce76641a5cc8b472e7215..7978470b5749352e3b1f81beb018648be65ac786 100644 --- a/arch/mn10300/kernel/rtc.c +++ b/arch/mn10300/kernel/rtc.c @@ -67,7 +67,7 @@ static int set_rtc_mmss(unsigned long nowtime) cmos_minutes = CMOS_READ(RTC_MINUTES); if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - BCD_TO_BIN(cmos_minutes); + cmos_minutes = bcd2bin(cmos_minutes); /* * since we're only adjusting minutes and seconds, @@ -84,8 +84,8 @@ static int set_rtc_mmss(unsigned long nowtime) if (abs(real_minutes - cmos_minutes) < 30) { if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(real_seconds); - BIN_TO_BCD(real_minutes); + real_seconds = bin2bcd(real_seconds); + real_minutes = bin2bcd(real_minutes); } CMOS_WRITE(real_seconds, RTC_SECONDS); CMOS_WRITE(real_minutes, RTC_MINUTES); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 8313fccced5e1553f6f3d7ff484d897d2d20b812..644a70b1b04e4fd9ecf16d2e73d6e22ea5227115 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -9,6 +9,8 @@ config PARISC def_bool y select HAVE_IDE select HAVE_OPROFILE + select RTC_CLASS + select RTC_DRV_PARISC help The PA-RISC microprocessor is designed by Hewlett-Packard and used in many of their workstations & servers (HP9000 700 and 800 series, @@ -90,6 +92,8 @@ config ARCH_MAY_HAVE_PC_FDC source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Processor type and features" diff --git a/include/asm-parisc/Kbuild b/arch/parisc/include/asm/Kbuild similarity index 100% rename from include/asm-parisc/Kbuild rename to arch/parisc/include/asm/Kbuild diff --git a/include/asm-parisc/agp.h b/arch/parisc/include/asm/agp.h similarity index 100% rename from include/asm-parisc/agp.h rename to arch/parisc/include/asm/agp.h diff --git a/include/asm-parisc/asmregs.h b/arch/parisc/include/asm/asmregs.h similarity index 100% rename from include/asm-parisc/asmregs.h rename to arch/parisc/include/asm/asmregs.h diff --git a/include/asm-parisc/assembly.h b/arch/parisc/include/asm/assembly.h similarity index 100% rename from include/asm-parisc/assembly.h rename to arch/parisc/include/asm/assembly.h diff --git a/include/asm-parisc/atomic.h b/arch/parisc/include/asm/atomic.h similarity index 100% rename from include/asm-parisc/atomic.h rename to arch/parisc/include/asm/atomic.h diff --git a/include/asm-parisc/auxvec.h b/arch/parisc/include/asm/auxvec.h similarity index 100% rename from include/asm-parisc/auxvec.h rename to arch/parisc/include/asm/auxvec.h diff --git a/include/asm-parisc/bitops.h b/arch/parisc/include/asm/bitops.h similarity index 100% rename from include/asm-parisc/bitops.h rename to arch/parisc/include/asm/bitops.h diff --git a/include/asm-parisc/bug.h b/arch/parisc/include/asm/bug.h similarity index 100% rename from include/asm-parisc/bug.h rename to arch/parisc/include/asm/bug.h diff --git a/include/asm-parisc/bugs.h b/arch/parisc/include/asm/bugs.h similarity index 100% rename from include/asm-parisc/bugs.h rename to arch/parisc/include/asm/bugs.h diff --git a/include/asm-parisc/byteorder.h b/arch/parisc/include/asm/byteorder.h similarity index 100% rename from include/asm-parisc/byteorder.h rename to arch/parisc/include/asm/byteorder.h diff --git a/include/asm-parisc/cache.h b/arch/parisc/include/asm/cache.h similarity index 100% rename from include/asm-parisc/cache.h rename to arch/parisc/include/asm/cache.h diff --git a/include/asm-parisc/cacheflush.h b/arch/parisc/include/asm/cacheflush.h similarity index 100% rename from include/asm-parisc/cacheflush.h rename to arch/parisc/include/asm/cacheflush.h diff --git a/include/asm-parisc/checksum.h b/arch/parisc/include/asm/checksum.h similarity index 100% rename from include/asm-parisc/checksum.h rename to arch/parisc/include/asm/checksum.h diff --git a/include/asm-parisc/compat.h b/arch/parisc/include/asm/compat.h similarity index 100% rename from include/asm-parisc/compat.h rename to arch/parisc/include/asm/compat.h diff --git a/include/asm-parisc/compat_rt_sigframe.h b/arch/parisc/include/asm/compat_rt_sigframe.h similarity index 100% rename from include/asm-parisc/compat_rt_sigframe.h rename to arch/parisc/include/asm/compat_rt_sigframe.h diff --git a/include/asm-parisc/compat_signal.h b/arch/parisc/include/asm/compat_signal.h similarity index 100% rename from include/asm-parisc/compat_signal.h rename to arch/parisc/include/asm/compat_signal.h diff --git a/include/asm-parisc/compat_ucontext.h b/arch/parisc/include/asm/compat_ucontext.h similarity index 100% rename from include/asm-parisc/compat_ucontext.h rename to arch/parisc/include/asm/compat_ucontext.h diff --git a/include/asm-parisc/cputime.h b/arch/parisc/include/asm/cputime.h similarity index 100% rename from include/asm-parisc/cputime.h rename to arch/parisc/include/asm/cputime.h diff --git a/include/asm-parisc/current.h b/arch/parisc/include/asm/current.h similarity index 100% rename from include/asm-parisc/current.h rename to arch/parisc/include/asm/current.h diff --git a/include/asm-parisc/delay.h b/arch/parisc/include/asm/delay.h similarity index 100% rename from include/asm-parisc/delay.h rename to arch/parisc/include/asm/delay.h diff --git a/include/asm-parisc/device.h b/arch/parisc/include/asm/device.h similarity index 100% rename from include/asm-parisc/device.h rename to arch/parisc/include/asm/device.h diff --git a/include/asm-parisc/div64.h b/arch/parisc/include/asm/div64.h similarity index 100% rename from include/asm-parisc/div64.h rename to arch/parisc/include/asm/div64.h diff --git a/include/asm-parisc/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h similarity index 100% rename from include/asm-parisc/dma-mapping.h rename to arch/parisc/include/asm/dma-mapping.h diff --git a/include/asm-parisc/dma.h b/arch/parisc/include/asm/dma.h similarity index 100% rename from include/asm-parisc/dma.h rename to arch/parisc/include/asm/dma.h diff --git a/include/asm-parisc/eisa_bus.h b/arch/parisc/include/asm/eisa_bus.h similarity index 100% rename from include/asm-parisc/eisa_bus.h rename to arch/parisc/include/asm/eisa_bus.h diff --git a/include/asm-parisc/eisa_eeprom.h b/arch/parisc/include/asm/eisa_eeprom.h similarity index 100% rename from include/asm-parisc/eisa_eeprom.h rename to arch/parisc/include/asm/eisa_eeprom.h diff --git a/include/asm-parisc/elf.h b/arch/parisc/include/asm/elf.h similarity index 100% rename from include/asm-parisc/elf.h rename to arch/parisc/include/asm/elf.h diff --git a/include/asm-parisc/emergency-restart.h b/arch/parisc/include/asm/emergency-restart.h similarity index 100% rename from include/asm-parisc/emergency-restart.h rename to arch/parisc/include/asm/emergency-restart.h diff --git a/include/asm-parisc/errno.h b/arch/parisc/include/asm/errno.h similarity index 100% rename from include/asm-parisc/errno.h rename to arch/parisc/include/asm/errno.h diff --git a/include/asm-parisc/fb.h b/arch/parisc/include/asm/fb.h similarity index 100% rename from include/asm-parisc/fb.h rename to arch/parisc/include/asm/fb.h diff --git a/include/asm-parisc/fcntl.h b/arch/parisc/include/asm/fcntl.h similarity index 100% rename from include/asm-parisc/fcntl.h rename to arch/parisc/include/asm/fcntl.h diff --git a/include/asm-parisc/fixmap.h b/arch/parisc/include/asm/fixmap.h similarity index 100% rename from include/asm-parisc/fixmap.h rename to arch/parisc/include/asm/fixmap.h diff --git a/include/asm-parisc/floppy.h b/arch/parisc/include/asm/floppy.h similarity index 100% rename from include/asm-parisc/floppy.h rename to arch/parisc/include/asm/floppy.h diff --git a/include/asm-parisc/futex.h b/arch/parisc/include/asm/futex.h similarity index 100% rename from include/asm-parisc/futex.h rename to arch/parisc/include/asm/futex.h diff --git a/include/asm-parisc/grfioctl.h b/arch/parisc/include/asm/grfioctl.h similarity index 100% rename from include/asm-parisc/grfioctl.h rename to arch/parisc/include/asm/grfioctl.h diff --git a/include/asm-parisc/hardirq.h b/arch/parisc/include/asm/hardirq.h similarity index 100% rename from include/asm-parisc/hardirq.h rename to arch/parisc/include/asm/hardirq.h diff --git a/include/asm-parisc/hardware.h b/arch/parisc/include/asm/hardware.h similarity index 100% rename from include/asm-parisc/hardware.h rename to arch/parisc/include/asm/hardware.h diff --git a/include/asm-parisc/hw_irq.h b/arch/parisc/include/asm/hw_irq.h similarity index 100% rename from include/asm-parisc/hw_irq.h rename to arch/parisc/include/asm/hw_irq.h diff --git a/include/asm-parisc/ide.h b/arch/parisc/include/asm/ide.h similarity index 77% rename from include/asm-parisc/ide.h rename to arch/parisc/include/asm/ide.h index c246ef75017db7fc048699577c0603121bb19f51..81700a2321cff3926bbf60320a2bed1cba248118 100644 --- a/include/asm-parisc/ide.h +++ b/arch/parisc/include/asm/ide.h @@ -13,10 +13,6 @@ #ifdef __KERNEL__ -#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id)) -#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id)) -#define ide_request_region(from,extent,name) request_region((from), (extent), (name)) -#define ide_release_region(from,extent) release_region((from), (extent)) /* Generic I/O and MEMIO string operations. */ #define __ide_insw insw diff --git a/include/asm-parisc/io.h b/arch/parisc/include/asm/io.h similarity index 100% rename from include/asm-parisc/io.h rename to arch/parisc/include/asm/io.h diff --git a/include/asm-parisc/ioctl.h b/arch/parisc/include/asm/ioctl.h similarity index 100% rename from include/asm-parisc/ioctl.h rename to arch/parisc/include/asm/ioctl.h diff --git a/include/asm-parisc/ioctls.h b/arch/parisc/include/asm/ioctls.h similarity index 100% rename from include/asm-parisc/ioctls.h rename to arch/parisc/include/asm/ioctls.h diff --git a/include/asm-parisc/ipcbuf.h b/arch/parisc/include/asm/ipcbuf.h similarity index 100% rename from include/asm-parisc/ipcbuf.h rename to arch/parisc/include/asm/ipcbuf.h diff --git a/include/asm-parisc/irq.h b/arch/parisc/include/asm/irq.h similarity index 100% rename from include/asm-parisc/irq.h rename to arch/parisc/include/asm/irq.h diff --git a/include/asm-parisc/irq_regs.h b/arch/parisc/include/asm/irq_regs.h similarity index 100% rename from include/asm-parisc/irq_regs.h rename to arch/parisc/include/asm/irq_regs.h diff --git a/include/asm-parisc/kdebug.h b/arch/parisc/include/asm/kdebug.h similarity index 100% rename from include/asm-parisc/kdebug.h rename to arch/parisc/include/asm/kdebug.h diff --git a/include/asm-parisc/kmap_types.h b/arch/parisc/include/asm/kmap_types.h similarity index 100% rename from include/asm-parisc/kmap_types.h rename to arch/parisc/include/asm/kmap_types.h diff --git a/include/asm-parisc/led.h b/arch/parisc/include/asm/led.h similarity index 100% rename from include/asm-parisc/led.h rename to arch/parisc/include/asm/led.h diff --git a/include/asm-parisc/linkage.h b/arch/parisc/include/asm/linkage.h similarity index 100% rename from include/asm-parisc/linkage.h rename to arch/parisc/include/asm/linkage.h diff --git a/include/asm-parisc/local.h b/arch/parisc/include/asm/local.h similarity index 100% rename from include/asm-parisc/local.h rename to arch/parisc/include/asm/local.h diff --git a/include/asm-parisc/machdep.h b/arch/parisc/include/asm/machdep.h similarity index 100% rename from include/asm-parisc/machdep.h rename to arch/parisc/include/asm/machdep.h diff --git a/include/asm-parisc/mc146818rtc.h b/arch/parisc/include/asm/mc146818rtc.h similarity index 100% rename from include/asm-parisc/mc146818rtc.h rename to arch/parisc/include/asm/mc146818rtc.h diff --git a/include/asm-parisc/mckinley.h b/arch/parisc/include/asm/mckinley.h similarity index 100% rename from include/asm-parisc/mckinley.h rename to arch/parisc/include/asm/mckinley.h diff --git a/include/asm-parisc/mman.h b/arch/parisc/include/asm/mman.h similarity index 100% rename from include/asm-parisc/mman.h rename to arch/parisc/include/asm/mman.h diff --git a/include/asm-parisc/mmu.h b/arch/parisc/include/asm/mmu.h similarity index 100% rename from include/asm-parisc/mmu.h rename to arch/parisc/include/asm/mmu.h diff --git a/include/asm-parisc/mmu_context.h b/arch/parisc/include/asm/mmu_context.h similarity index 100% rename from include/asm-parisc/mmu_context.h rename to arch/parisc/include/asm/mmu_context.h diff --git a/include/asm-parisc/mmzone.h b/arch/parisc/include/asm/mmzone.h similarity index 100% rename from include/asm-parisc/mmzone.h rename to arch/parisc/include/asm/mmzone.h diff --git a/include/asm-parisc/module.h b/arch/parisc/include/asm/module.h similarity index 100% rename from include/asm-parisc/module.h rename to arch/parisc/include/asm/module.h diff --git a/include/asm-parisc/msgbuf.h b/arch/parisc/include/asm/msgbuf.h similarity index 100% rename from include/asm-parisc/msgbuf.h rename to arch/parisc/include/asm/msgbuf.h diff --git a/include/asm-parisc/mutex.h b/arch/parisc/include/asm/mutex.h similarity index 100% rename from include/asm-parisc/mutex.h rename to arch/parisc/include/asm/mutex.h diff --git a/include/asm-parisc/page.h b/arch/parisc/include/asm/page.h similarity index 100% rename from include/asm-parisc/page.h rename to arch/parisc/include/asm/page.h diff --git a/include/asm-parisc/param.h b/arch/parisc/include/asm/param.h similarity index 100% rename from include/asm-parisc/param.h rename to arch/parisc/include/asm/param.h diff --git a/include/asm-parisc/parisc-device.h b/arch/parisc/include/asm/parisc-device.h similarity index 100% rename from include/asm-parisc/parisc-device.h rename to arch/parisc/include/asm/parisc-device.h diff --git a/include/asm-parisc/parport.h b/arch/parisc/include/asm/parport.h similarity index 100% rename from include/asm-parisc/parport.h rename to arch/parisc/include/asm/parport.h diff --git a/include/asm-parisc/pci.h b/arch/parisc/include/asm/pci.h similarity index 100% rename from include/asm-parisc/pci.h rename to arch/parisc/include/asm/pci.h diff --git a/include/asm-parisc/pdc.h b/arch/parisc/include/asm/pdc.h similarity index 99% rename from include/asm-parisc/pdc.h rename to arch/parisc/include/asm/pdc.h index 9eaa794c3e4a3f1de70716e450896e1cddf8d570..c584b00c6074af9419ac31badf1e432f38262815 100644 --- a/include/asm-parisc/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -332,6 +332,9 @@ #define BOOT_CONSOLE_SPA_OFFSET 0x3c4 #define BOOT_CONSOLE_PATH_OFFSET 0x3a8 +/* size of the pdc_result buffer for firmware.c */ +#define NUM_PDC_RESULT 32 + #if !defined(__ASSEMBLY__) #ifdef __KERNEL__ @@ -600,6 +603,7 @@ int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsi int pdc_chassis_disp(unsigned long disp); int pdc_chassis_warn(unsigned long *warn); int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info); +int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info); int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index, void *iodc_data, unsigned int iodc_data_size); int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info, @@ -638,6 +642,7 @@ int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr, #endif void set_firmware_width(void); +void set_firmware_width_unlocked(void); int pdc_do_firm_test_reset(unsigned long ftc_bitmap); int pdc_do_reset(void); int pdc_soft_power_info(unsigned long *power_reg); diff --git a/include/asm-parisc/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h similarity index 100% rename from include/asm-parisc/pdc_chassis.h rename to arch/parisc/include/asm/pdc_chassis.h diff --git a/include/asm-parisc/pdcpat.h b/arch/parisc/include/asm/pdcpat.h similarity index 100% rename from include/asm-parisc/pdcpat.h rename to arch/parisc/include/asm/pdcpat.h diff --git a/include/asm-parisc/percpu.h b/arch/parisc/include/asm/percpu.h similarity index 100% rename from include/asm-parisc/percpu.h rename to arch/parisc/include/asm/percpu.h diff --git a/include/asm-parisc/perf.h b/arch/parisc/include/asm/perf.h similarity index 100% rename from include/asm-parisc/perf.h rename to arch/parisc/include/asm/perf.h diff --git a/include/asm-parisc/pgalloc.h b/arch/parisc/include/asm/pgalloc.h similarity index 100% rename from include/asm-parisc/pgalloc.h rename to arch/parisc/include/asm/pgalloc.h diff --git a/include/asm-parisc/pgtable.h b/arch/parisc/include/asm/pgtable.h similarity index 100% rename from include/asm-parisc/pgtable.h rename to arch/parisc/include/asm/pgtable.h diff --git a/include/asm-parisc/poll.h b/arch/parisc/include/asm/poll.h similarity index 100% rename from include/asm-parisc/poll.h rename to arch/parisc/include/asm/poll.h diff --git a/include/asm-parisc/posix_types.h b/arch/parisc/include/asm/posix_types.h similarity index 100% rename from include/asm-parisc/posix_types.h rename to arch/parisc/include/asm/posix_types.h diff --git a/include/asm-parisc/prefetch.h b/arch/parisc/include/asm/prefetch.h similarity index 100% rename from include/asm-parisc/prefetch.h rename to arch/parisc/include/asm/prefetch.h diff --git a/include/asm-parisc/processor.h b/arch/parisc/include/asm/processor.h similarity index 100% rename from include/asm-parisc/processor.h rename to arch/parisc/include/asm/processor.h diff --git a/include/asm-parisc/psw.h b/arch/parisc/include/asm/psw.h similarity index 100% rename from include/asm-parisc/psw.h rename to arch/parisc/include/asm/psw.h diff --git a/include/asm-parisc/ptrace.h b/arch/parisc/include/asm/ptrace.h similarity index 85% rename from include/asm-parisc/ptrace.h rename to arch/parisc/include/asm/ptrace.h index 3e94c5d85ff5715a3d95af8e6444b27ae081a1fd..afa5333187b4519126d2556b008e9fdfaba0fde5 100644 --- a/include/asm-parisc/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -47,6 +47,16 @@ struct pt_regs { #define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS)) +#define __ARCH_WANT_COMPAT_SYS_PTRACE + +struct task_struct; +#define arch_has_single_step() 1 +void user_disable_single_step(struct task_struct *task); +void user_enable_single_step(struct task_struct *task); + +#define arch_has_block_step() 1 +void user_enable_block_step(struct task_struct *task); + /* XXX should we use iaoq[1] or iaoq[0] ? */ #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) diff --git a/include/asm-parisc/real.h b/arch/parisc/include/asm/real.h similarity index 100% rename from include/asm-parisc/real.h rename to arch/parisc/include/asm/real.h diff --git a/include/asm-parisc/resource.h b/arch/parisc/include/asm/resource.h similarity index 100% rename from include/asm-parisc/resource.h rename to arch/parisc/include/asm/resource.h diff --git a/include/asm-parisc/ropes.h b/arch/parisc/include/asm/ropes.h similarity index 99% rename from include/asm-parisc/ropes.h rename to arch/parisc/include/asm/ropes.h index 007a880615eb80d766863f8e0b3708750c2ebbef..09f51d5ab57c254d78693b633d9077fd26454856 100644 --- a/include/asm-parisc/ropes.h +++ b/arch/parisc/include/asm/ropes.h @@ -1,7 +1,7 @@ #ifndef _ASM_PARISC_ROPES_H_ #define _ASM_PARISC_ROPES_H_ -#include +#include #ifdef CONFIG_64BIT /* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */ diff --git a/include/asm-parisc/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h similarity index 100% rename from include/asm-parisc/rt_sigframe.h rename to arch/parisc/include/asm/rt_sigframe.h diff --git a/include/asm-parisc/rtc.h b/arch/parisc/include/asm/rtc.h similarity index 100% rename from include/asm-parisc/rtc.h rename to arch/parisc/include/asm/rtc.h diff --git a/include/asm-parisc/runway.h b/arch/parisc/include/asm/runway.h similarity index 100% rename from include/asm-parisc/runway.h rename to arch/parisc/include/asm/runway.h diff --git a/include/asm-parisc/scatterlist.h b/arch/parisc/include/asm/scatterlist.h similarity index 100% rename from include/asm-parisc/scatterlist.h rename to arch/parisc/include/asm/scatterlist.h diff --git a/include/asm-parisc/sections.h b/arch/parisc/include/asm/sections.h similarity index 100% rename from include/asm-parisc/sections.h rename to arch/parisc/include/asm/sections.h diff --git a/include/asm-parisc/segment.h b/arch/parisc/include/asm/segment.h similarity index 100% rename from include/asm-parisc/segment.h rename to arch/parisc/include/asm/segment.h diff --git a/include/asm-parisc/sembuf.h b/arch/parisc/include/asm/sembuf.h similarity index 100% rename from include/asm-parisc/sembuf.h rename to arch/parisc/include/asm/sembuf.h diff --git a/include/asm-parisc/serial.h b/arch/parisc/include/asm/serial.h similarity index 100% rename from include/asm-parisc/serial.h rename to arch/parisc/include/asm/serial.h diff --git a/include/asm-parisc/setup.h b/arch/parisc/include/asm/setup.h similarity index 100% rename from include/asm-parisc/setup.h rename to arch/parisc/include/asm/setup.h diff --git a/include/asm-parisc/shmbuf.h b/arch/parisc/include/asm/shmbuf.h similarity index 100% rename from include/asm-parisc/shmbuf.h rename to arch/parisc/include/asm/shmbuf.h diff --git a/include/asm-parisc/shmparam.h b/arch/parisc/include/asm/shmparam.h similarity index 100% rename from include/asm-parisc/shmparam.h rename to arch/parisc/include/asm/shmparam.h diff --git a/include/asm-parisc/sigcontext.h b/arch/parisc/include/asm/sigcontext.h similarity index 100% rename from include/asm-parisc/sigcontext.h rename to arch/parisc/include/asm/sigcontext.h diff --git a/include/asm-parisc/siginfo.h b/arch/parisc/include/asm/siginfo.h similarity index 100% rename from include/asm-parisc/siginfo.h rename to arch/parisc/include/asm/siginfo.h diff --git a/include/asm-parisc/signal.h b/arch/parisc/include/asm/signal.h similarity index 100% rename from include/asm-parisc/signal.h rename to arch/parisc/include/asm/signal.h diff --git a/include/asm-parisc/smp.h b/arch/parisc/include/asm/smp.h similarity index 100% rename from include/asm-parisc/smp.h rename to arch/parisc/include/asm/smp.h diff --git a/include/asm-parisc/socket.h b/arch/parisc/include/asm/socket.h similarity index 100% rename from include/asm-parisc/socket.h rename to arch/parisc/include/asm/socket.h diff --git a/include/asm-parisc/sockios.h b/arch/parisc/include/asm/sockios.h similarity index 100% rename from include/asm-parisc/sockios.h rename to arch/parisc/include/asm/sockios.h diff --git a/include/asm-parisc/spinlock.h b/arch/parisc/include/asm/spinlock.h similarity index 100% rename from include/asm-parisc/spinlock.h rename to arch/parisc/include/asm/spinlock.h diff --git a/include/asm-parisc/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h similarity index 100% rename from include/asm-parisc/spinlock_types.h rename to arch/parisc/include/asm/spinlock_types.h diff --git a/include/asm-parisc/stat.h b/arch/parisc/include/asm/stat.h similarity index 100% rename from include/asm-parisc/stat.h rename to arch/parisc/include/asm/stat.h diff --git a/include/asm-parisc/statfs.h b/arch/parisc/include/asm/statfs.h similarity index 100% rename from include/asm-parisc/statfs.h rename to arch/parisc/include/asm/statfs.h diff --git a/include/asm-parisc/string.h b/arch/parisc/include/asm/string.h similarity index 100% rename from include/asm-parisc/string.h rename to arch/parisc/include/asm/string.h diff --git a/include/asm-parisc/superio.h b/arch/parisc/include/asm/superio.h similarity index 100% rename from include/asm-parisc/superio.h rename to arch/parisc/include/asm/superio.h diff --git a/include/asm-parisc/system.h b/arch/parisc/include/asm/system.h similarity index 100% rename from include/asm-parisc/system.h rename to arch/parisc/include/asm/system.h diff --git a/include/asm-parisc/termbits.h b/arch/parisc/include/asm/termbits.h similarity index 100% rename from include/asm-parisc/termbits.h rename to arch/parisc/include/asm/termbits.h diff --git a/include/asm-parisc/termios.h b/arch/parisc/include/asm/termios.h similarity index 100% rename from include/asm-parisc/termios.h rename to arch/parisc/include/asm/termios.h diff --git a/include/asm-parisc/thread_info.h b/arch/parisc/include/asm/thread_info.h similarity index 96% rename from include/asm-parisc/thread_info.h rename to arch/parisc/include/asm/thread_info.h index 9f812741c3553d41a45e8dada845c7bb57fef86a..0407959da489d54a72b4ccab371cc21e996e79a3 100644 --- a/include/asm-parisc/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -58,6 +58,7 @@ struct thread_info { #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_MEMDIE 5 #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ +#define TIF_FREEZE 7 /* is freezing for suspend */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -65,6 +66,7 @@ struct thread_info { #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) diff --git a/include/asm-parisc/timex.h b/arch/parisc/include/asm/timex.h similarity index 100% rename from include/asm-parisc/timex.h rename to arch/parisc/include/asm/timex.h diff --git a/include/asm-parisc/tlb.h b/arch/parisc/include/asm/tlb.h similarity index 100% rename from include/asm-parisc/tlb.h rename to arch/parisc/include/asm/tlb.h diff --git a/include/asm-parisc/tlbflush.h b/arch/parisc/include/asm/tlbflush.h similarity index 100% rename from include/asm-parisc/tlbflush.h rename to arch/parisc/include/asm/tlbflush.h diff --git a/include/asm-parisc/topology.h b/arch/parisc/include/asm/topology.h similarity index 100% rename from include/asm-parisc/topology.h rename to arch/parisc/include/asm/topology.h diff --git a/include/asm-parisc/traps.h b/arch/parisc/include/asm/traps.h similarity index 100% rename from include/asm-parisc/traps.h rename to arch/parisc/include/asm/traps.h diff --git a/include/asm-parisc/types.h b/arch/parisc/include/asm/types.h similarity index 100% rename from include/asm-parisc/types.h rename to arch/parisc/include/asm/types.h diff --git a/include/asm-parisc/uaccess.h b/arch/parisc/include/asm/uaccess.h similarity index 100% rename from include/asm-parisc/uaccess.h rename to arch/parisc/include/asm/uaccess.h diff --git a/include/asm-parisc/ucontext.h b/arch/parisc/include/asm/ucontext.h similarity index 100% rename from include/asm-parisc/ucontext.h rename to arch/parisc/include/asm/ucontext.h diff --git a/include/asm-parisc/unaligned.h b/arch/parisc/include/asm/unaligned.h similarity index 100% rename from include/asm-parisc/unaligned.h rename to arch/parisc/include/asm/unaligned.h diff --git a/include/asm-parisc/unistd.h b/arch/parisc/include/asm/unistd.h similarity index 99% rename from include/asm-parisc/unistd.h rename to arch/parisc/include/asm/unistd.h index a7d857f0e4f404963c86f2baeb240f80d7a6f110..ef26b009dc5da1f323a7fc77d329e6c520b5e40f 100644 --- a/include/asm-parisc/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -801,8 +801,14 @@ #define __NR_timerfd_create (__NR_Linux + 306) #define __NR_timerfd_settime (__NR_Linux + 307) #define __NR_timerfd_gettime (__NR_Linux + 308) - -#define __NR_Linux_syscalls (__NR_timerfd_gettime + 1) +#define __NR_signalfd4 (__NR_Linux + 309) +#define __NR_eventfd2 (__NR_Linux + 310) +#define __NR_epoll_create1 (__NR_Linux + 311) +#define __NR_dup3 (__NR_Linux + 312) +#define __NR_pipe2 (__NR_Linux + 313) +#define __NR_inotify_init1 (__NR_Linux + 314) + +#define __NR_Linux_syscalls (__NR_inotify_init1 + 1) #define __IGNORE_select /* newselect */ diff --git a/include/asm-parisc/unwind.h b/arch/parisc/include/asm/unwind.h similarity index 99% rename from include/asm-parisc/unwind.h rename to arch/parisc/include/asm/unwind.h index 2f7e6e50a1580de34d23216018cc015d1bf7780f..52482e4fc20d1f11407243915968523160839fbb 100644 --- a/include/asm-parisc/unwind.h +++ b/arch/parisc/include/asm/unwind.h @@ -74,4 +74,6 @@ void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *r int unwind_once(struct unwind_frame_info *info); int unwind_to_user(struct unwind_frame_info *info); +int unwind_init(void); + #endif diff --git a/include/asm-parisc/user.h b/arch/parisc/include/asm/user.h similarity index 100% rename from include/asm-parisc/user.h rename to arch/parisc/include/asm/user.h diff --git a/include/asm-parisc/vga.h b/arch/parisc/include/asm/vga.h similarity index 100% rename from include/asm-parisc/vga.h rename to arch/parisc/include/asm/vga.h diff --git a/include/asm-parisc/xor.h b/arch/parisc/include/asm/xor.h similarity index 100% rename from include/asm-parisc/xor.h rename to arch/parisc/include/asm/xor.h diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c5f676c3c224b67afb51c50589e82fbf6d333c46 --- /dev/null +++ b/arch/parisc/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 3efc0b73e4ff9ce71efd7bf93550af12f1710394..699cf8ef211816576c5ee10d29c7bc67257580fc 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -290,5 +290,8 @@ int main(void) DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); + BLANK(); + DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long)); + BLANK(); return 0; } diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 7177a6cd1b7f58b0fcc83f60ea5c11cf1bd96e1a..03f26bd75bd8ebcb301beb7a7e90c07454d08412 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -71,8 +71,8 @@ #include /* for boot_cpu_data */ static DEFINE_SPINLOCK(pdc_lock); -static unsigned long pdc_result[32] __attribute__ ((aligned (8))); -static unsigned long pdc_result2[32] __attribute__ ((aligned (8))); +extern unsigned long pdc_result[NUM_PDC_RESULT]; +extern unsigned long pdc_result2[NUM_PDC_RESULT]; #ifdef CONFIG_64BIT #define WIDE_FIRMWARE 0x1 @@ -150,26 +150,40 @@ static void convert_to_wide(unsigned long *addr) #endif } +#ifdef CONFIG_64BIT +void __init set_firmware_width_unlocked(void) +{ + int ret; + + ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, + __pa(pdc_result), 0); + convert_to_wide(pdc_result); + if (pdc_result[0] != NARROW_FIRMWARE) + parisc_narrow_firmware = 0; +} + /** * set_firmware_width - Determine if the firmware is wide or narrow. * - * This function must be called before any pdc_* function that uses the convert_to_wide - * function. + * This function must be called before any pdc_* function that uses the + * convert_to_wide function. */ void __init set_firmware_width(void) { -#ifdef CONFIG_64BIT - int retval; unsigned long flags; + spin_lock_irqsave(&pdc_lock, flags); + set_firmware_width_unlocked(); + spin_unlock_irqrestore(&pdc_lock, flags); +} +#else +void __init set_firmware_width_unlocked(void) { + return; +} - spin_lock_irqsave(&pdc_lock, flags); - retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0); - convert_to_wide(pdc_result); - if(pdc_result[0] != NARROW_FIRMWARE) - parisc_narrow_firmware = 0; - spin_unlock_irqrestore(&pdc_lock, flags); -#endif +void __init set_firmware_width(void) { + return; } +#endif /*CONFIG_64BIT*/ /** * pdc_emergency_unlock - Unlock the linux pdc lock @@ -288,6 +302,20 @@ int pdc_chassis_warn(unsigned long *warn) return retval; } +int __init pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info) +{ + int ret; + + ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result)); + convert_to_wide(pdc_result); + pdc_coproc_info->ccr_functional = pdc_result[0]; + pdc_coproc_info->ccr_present = pdc_result[1]; + pdc_coproc_info->revision = pdc_result[17]; + pdc_coproc_info->model = pdc_result[18]; + + return ret; +} + /** * pdc_coproc_cfg - To identify coprocessors attached to the processor. * @pdc_coproc_info: Return buffer address. @@ -297,19 +325,14 @@ int pdc_chassis_warn(unsigned long *warn) */ int __init pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info) { - int retval; + int ret; unsigned long flags; - spin_lock_irqsave(&pdc_lock, flags); - retval = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result)); - convert_to_wide(pdc_result); - pdc_coproc_info->ccr_functional = pdc_result[0]; - pdc_coproc_info->ccr_present = pdc_result[1]; - pdc_coproc_info->revision = pdc_result[17]; - pdc_coproc_info->model = pdc_result[18]; - spin_unlock_irqrestore(&pdc_lock, flags); + spin_lock_irqsave(&pdc_lock, flags); + ret = pdc_coproc_cfg_unlocked(pdc_coproc_info); + spin_unlock_irqrestore(&pdc_lock, flags); - return retval; + return ret; } /** diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index a84e31e828768943a7ac288ccb26aa49499b275c..0e3d9f9b9e33e97680190e3752cea063fce9c251 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -121,7 +121,7 @@ $pgt_fill_loop: copy %r0,%r2 /* And the RFI Target address too */ - load32 start_kernel,%r11 + load32 start_parisc,%r11 /* And the initial task pointer */ load32 init_thread_union,%r6 diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 49c637970789ba81da5741a75817ef1274e3b1ac..90904f9dfc504fb1e0337abb196b1617006ce514 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -4,6 +4,7 @@ * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. * Copyright (C) 2000 Matthew Wilcox * Copyright (C) 2000 David Huggins-Daines + * Copyright (C) 2008 Helge Deller */ #include @@ -27,15 +28,149 @@ /* PSW bits we allow the debugger to modify */ #define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) -#undef DEBUG_PTRACE +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *task) +{ + task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); -#ifdef DEBUG_PTRACE -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif + /* make sure the trap bits are not set */ + pa_psw(task)->r = 0; + pa_psw(task)->t = 0; + pa_psw(task)->h = 0; + pa_psw(task)->l = 0; +} + +/* + * The following functions are called by ptrace_resume() when + * enabling or disabling single/block tracing. + */ +void user_disable_single_step(struct task_struct *task) +{ + ptrace_disable(task); +} + +void user_enable_single_step(struct task_struct *task) +{ + task->ptrace &= ~PT_BLOCKSTEP; + task->ptrace |= PT_SINGLESTEP; + + if (pa_psw(task)->n) { + struct siginfo si; + + /* Nullified, just crank over the queue. */ + task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; + task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; + task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; + pa_psw(task)->n = 0; + pa_psw(task)->x = 0; + pa_psw(task)->y = 0; + pa_psw(task)->z = 0; + pa_psw(task)->b = 0; + ptrace_disable(task); + /* Don't wake up the task, but let the + parent know something happened. */ + si.si_code = TRAP_TRACE; + si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3); + si.si_signo = SIGTRAP; + si.si_errno = 0; + force_sig_info(SIGTRAP, &si, task); + /* notify_parent(task, SIGCHLD); */ + return; + } + + /* Enable recovery counter traps. The recovery counter + * itself will be set to zero on a task switch. If the + * task is suspended on a syscall then the syscall return + * path will overwrite the recovery counter with a suitable + * value such that it traps once back in user space. We + * disable interrupts in the tasks PSW here also, to avoid + * interrupts while the recovery counter is decrementing. + */ + pa_psw(task)->r = 1; + pa_psw(task)->t = 0; + pa_psw(task)->h = 0; + pa_psw(task)->l = 0; +} + +void user_enable_block_step(struct task_struct *task) +{ + task->ptrace &= ~PT_SINGLESTEP; + task->ptrace |= PT_BLOCKSTEP; + + /* Enable taken branch trap. */ + pa_psw(task)->r = 0; + pa_psw(task)->t = 1; + pa_psw(task)->h = 0; + pa_psw(task)->l = 0; +} + +long arch_ptrace(struct task_struct *child, long request, long addr, long data) +{ + unsigned long tmp; + long ret = -EIO; -#ifdef CONFIG_64BIT + switch (request) { + + /* Read the word at location addr in the USER area. For ptraced + processes, the kernel saves all regs on a syscall. */ + case PTRACE_PEEKUSR: + if ((addr & (sizeof(long)-1)) || + (unsigned long) addr >= sizeof(struct pt_regs)) + break; + tmp = *(unsigned long *) ((char *) task_regs(child) + addr); + ret = put_user(tmp, (unsigned long *) data); + break; + + /* Write the word at location addr in the USER area. This will need + to change when the kernel no longer saves all regs on a syscall. + FIXME. There is a problem at the moment in that r3-r18 are only + saved if the process is ptraced on syscall entry, and even then + those values are overwritten by actual register values on syscall + exit. */ + case PTRACE_POKEUSR: + /* Some register values written here may be ignored in + * entry.S:syscall_restore_rfi; e.g. iaoq is written with + * r31/r31+4, and not with the values in pt_regs. + */ + if (addr == PT_PSW) { + /* Allow writing to Nullify, Divide-step-correction, + * and carry/borrow bits. + * BEWARE, if you set N, and then single step, it won't + * stop on the nullified instruction. + */ + data &= USER_PSW_BITS; + task_regs(child)->gr[0] &= ~USER_PSW_BITS; + task_regs(child)->gr[0] |= data; + ret = 0; + break; + } + + if ((addr & (sizeof(long)-1)) || + (unsigned long) addr >= sizeof(struct pt_regs)) + break; + if ((addr >= PT_GR1 && addr <= PT_GR31) || + addr == PT_IAOQ0 || addr == PT_IAOQ1 || + (addr >= PT_FR0 && addr <= PT_FR31 + 4) || + addr == PT_SAR) { + *(unsigned long *) ((char *) task_regs(child) + addr) = data; + ret = 0; + } + break; + + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + + +#ifdef CONFIG_COMPAT /* This function is needed to translate 32 bit pt_regs offsets in to * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel @@ -61,106 +196,25 @@ static long translate_usr_offset(long offset) else return -1; } -#endif -/* - * Called by kernel/ptrace.c when detaching.. - * - * Make sure single step bits etc are not set. - */ -void ptrace_disable(struct task_struct *child) +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data) { - /* make sure the trap bits are not set */ - pa_psw(child)->r = 0; - pa_psw(child)->t = 0; - pa_psw(child)->h = 0; - pa_psw(child)->l = 0; -} - -long arch_ptrace(struct task_struct *child, long request, long addr, long data) -{ - long ret; -#ifdef DEBUG_PTRACE - long oaddr=addr, odata=data; -#endif + compat_uint_t tmp; + long ret = -EIO; switch (request) { - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { -#ifdef CONFIG_64BIT - if (__is_compat_task(child)) { - int copied; - unsigned int tmp; - - addr &= 0xffffffffL; - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - goto out_tsk; - ret = put_user(tmp,(unsigned int *) data); - DBG("sys_ptrace(PEEK%s, %d, %lx, %lx) returning %ld, data %x\n", - request == PTRACE_PEEKTEXT ? "TEXT" : "DATA", - pid, oaddr, odata, ret, tmp); - } - else -#endif - ret = generic_ptrace_peekdata(child, addr, data); - goto out_tsk; - } - /* when I and D space are separate, this will have to be fixed. */ - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - ret = 0; -#ifdef CONFIG_64BIT - if (__is_compat_task(child)) { - unsigned int tmp = (unsigned int)data; - DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n", - request == PTRACE_POKETEXT ? "TEXT" : "DATA", - pid, oaddr, odata); - addr &= 0xffffffffL; - if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1) == sizeof(tmp)) - goto out_tsk; - } - else -#endif - { - if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) - goto out_tsk; - } - ret = -EIO; - goto out_tsk; - - /* Read the word at location addr in the USER area. For ptraced - processes, the kernel saves all regs on a syscall. */ - case PTRACE_PEEKUSR: { - ret = -EIO; -#ifdef CONFIG_64BIT - if (__is_compat_task(child)) { - unsigned int tmp; - - if (addr & (sizeof(int)-1)) - goto out_tsk; - if ((addr = translate_usr_offset(addr)) < 0) - goto out_tsk; - - tmp = *(unsigned int *) ((char *) task_regs(child) + addr); - ret = put_user(tmp, (unsigned int *) data); - DBG("sys_ptrace(PEEKUSR, %d, %lx, %lx) returning %ld, addr %lx, data %x\n", - pid, oaddr, odata, ret, addr, tmp); - } - else -#endif - { - unsigned long tmp; + case PTRACE_PEEKUSR: + if (addr & (sizeof(compat_uint_t)-1)) + break; + addr = translate_usr_offset(addr); + if (addr < 0) + break; - if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs)) - goto out_tsk; - tmp = *(unsigned long *) ((char *) task_regs(child) + addr); - ret = put_user(tmp, (unsigned long *) data); - } - goto out_tsk; - } + tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); + ret = put_user(tmp, (compat_uint_t *) (unsigned long) data); + break; /* Write the word at location addr in the USER area. This will need to change when the kernel no longer saves all regs on a syscall. @@ -169,185 +223,46 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) those values are overwritten by actual register values on syscall exit. */ case PTRACE_POKEUSR: - ret = -EIO; /* Some register values written here may be ignored in * entry.S:syscall_restore_rfi; e.g. iaoq is written with * r31/r31+4, and not with the values in pt_regs. */ - /* PT_PSW=0, so this is valid for 32 bit processes under 64 - * bit kernels. - */ if (addr == PT_PSW) { - /* PT_PSW=0, so this is valid for 32 bit processes - * under 64 bit kernels. - * - * Allow writing to Nullify, Divide-step-correction, - * and carry/borrow bits. - * BEWARE, if you set N, and then single step, it won't - * stop on the nullified instruction. + /* Since PT_PSW==0, it is valid for 32 bit processes + * under 64 bit kernels as well. */ - DBG("sys_ptrace(POKEUSR, %d, %lx, %lx)\n", - pid, oaddr, odata); - data &= USER_PSW_BITS; - task_regs(child)->gr[0] &= ~USER_PSW_BITS; - task_regs(child)->gr[0] |= data; - ret = 0; - goto out_tsk; - } -#ifdef CONFIG_64BIT - if (__is_compat_task(child)) { - if (addr & (sizeof(int)-1)) - goto out_tsk; - if ((addr = translate_usr_offset(addr)) < 0) - goto out_tsk; - DBG("sys_ptrace(POKEUSR, %d, %lx, %lx) addr %lx\n", - pid, oaddr, odata, addr); + ret = arch_ptrace(child, request, addr, data); + } else { + if (addr & (sizeof(compat_uint_t)-1)) + break; + addr = translate_usr_offset(addr); + if (addr < 0) + break; if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { /* Special case, fp regs are 64 bits anyway */ - *(unsigned int *) ((char *) task_regs(child) + addr) = data; + *(__u64 *) ((char *) task_regs(child) + addr) = data; ret = 0; } else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || addr == PT_SAR+4) { /* Zero the top 32 bits */ - *(unsigned int *) ((char *) task_regs(child) + addr - 4) = 0; - *(unsigned int *) ((char *) task_regs(child) + addr) = data; + *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0; + *(__u32 *) ((char *) task_regs(child) + addr) = data; ret = 0; } - goto out_tsk; } - else -#endif - { - if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs)) - goto out_tsk; - if ((addr >= PT_GR1 && addr <= PT_GR31) || - addr == PT_IAOQ0 || addr == PT_IAOQ1 || - (addr >= PT_FR0 && addr <= PT_FR31 + 4) || - addr == PT_SAR) { - *(unsigned long *) ((char *) task_regs(child) + addr) = data; - ret = 0; - } - goto out_tsk; - } - - case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ - case PTRACE_CONT: - ret = -EIO; - DBG("sys_ptrace(%s)\n", - request == PTRACE_SYSCALL ? "SYSCALL" : "CONT"); - if (!valid_signal(data)) - goto out_tsk; - child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); - if (request == PTRACE_SYSCALL) { - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - } else { - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - } - child->exit_code = data; - goto out_wake_notrap; - - case PTRACE_KILL: - /* - * make the child exit. Best I can do is send it a - * sigkill. perhaps it should be put in the status - * that it wants to exit. - */ - ret = 0; - DBG("sys_ptrace(KILL)\n"); - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - goto out_tsk; - child->exit_code = SIGKILL; - goto out_wake_notrap; - - case PTRACE_SINGLEBLOCK: - DBG("sys_ptrace(SINGLEBLOCK)\n"); - ret = -EIO; - if (!valid_signal(data)) - goto out_tsk; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->ptrace &= ~PT_SINGLESTEP; - child->ptrace |= PT_BLOCKSTEP; - child->exit_code = data; - - /* Enable taken branch trap. */ - pa_psw(child)->r = 0; - pa_psw(child)->t = 1; - pa_psw(child)->h = 0; - pa_psw(child)->l = 0; - goto out_wake; - - case PTRACE_SINGLESTEP: - DBG("sys_ptrace(SINGLESTEP)\n"); - ret = -EIO; - if (!valid_signal(data)) - goto out_tsk; - - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->ptrace &= ~PT_BLOCKSTEP; - child->ptrace |= PT_SINGLESTEP; - child->exit_code = data; - - if (pa_psw(child)->n) { - struct siginfo si; - - /* Nullified, just crank over the queue. */ - task_regs(child)->iaoq[0] = task_regs(child)->iaoq[1]; - task_regs(child)->iasq[0] = task_regs(child)->iasq[1]; - task_regs(child)->iaoq[1] = task_regs(child)->iaoq[0] + 4; - pa_psw(child)->n = 0; - pa_psw(child)->x = 0; - pa_psw(child)->y = 0; - pa_psw(child)->z = 0; - pa_psw(child)->b = 0; - ptrace_disable(child); - /* Don't wake up the child, but let the - parent know something happened. */ - si.si_code = TRAP_TRACE; - si.si_addr = (void __user *) (task_regs(child)->iaoq[0] & ~3); - si.si_signo = SIGTRAP; - si.si_errno = 0; - force_sig_info(SIGTRAP, &si, child); - //notify_parent(child, SIGCHLD); - //ret = 0; - goto out_wake; - } - - /* Enable recovery counter traps. The recovery counter - * itself will be set to zero on a task switch. If the - * task is suspended on a syscall then the syscall return - * path will overwrite the recovery counter with a suitable - * value such that it traps once back in user space. We - * disable interrupts in the childs PSW here also, to avoid - * interrupts while the recovery counter is decrementing. - */ - pa_psw(child)->r = 1; - pa_psw(child)->t = 0; - pa_psw(child)->h = 0; - pa_psw(child)->l = 0; - /* give it a chance to run. */ - goto out_wake; - - case PTRACE_GETEVENTMSG: - ret = put_user(child->ptrace_message, (unsigned int __user *) data); - goto out_tsk; + break; default: - ret = ptrace_request(child, request, addr, data); - goto out_tsk; + ret = compat_ptrace_request(child, request, addr, data); + break; } -out_wake_notrap: - ptrace_disable(child); -out_wake: - wake_up_process(child); - ret = 0; -out_tsk: - DBG("arch_ptrace(%ld, %d, %lx, %lx) returning %ld\n", - request, pid, oaddr, odata, ret); return ret; } +#endif + void syscall_trace(void) { diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 7a92695d95a6bf814a72803da7d02403953eacd4..5f3d3a1f9037c7438b980c3e5f8d23589c4a5461 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S @@ -8,12 +8,24 @@ * */ +#include #include #include +#include #include + .section .bss + + .export pdc_result + .export pdc_result2 + .align 8 +pdc_result: + .block ASM_PDC_RESULT_SIZE +pdc_result2: + .block ASM_PDC_RESULT_SIZE + .export real_stack .export real32_stack .export real64_stack diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 39e7c5a5946a8981deee1afa1296efc7cff9eadc..7d27853ff8c8b6233ec4a0e1f2fe3747fbbf88aa 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -44,6 +44,7 @@ #include #include #include +#include static char __initdata command_line[COMMAND_LINE_SIZE]; @@ -123,6 +124,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_64BIT extern int parisc_narrow_firmware; #endif + unwind_init(); init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */ @@ -368,6 +370,31 @@ static int __init parisc_init(void) return 0; } - arch_initcall(parisc_init); +void start_parisc(void) +{ + extern void start_kernel(void); + + int ret, cpunum; + struct pdc_coproc_cfg coproc_cfg; + + cpunum = smp_processor_id(); + + set_firmware_width_unlocked(); + + ret = pdc_coproc_cfg_unlocked(&coproc_cfg); + if (ret >= 0 && coproc_cfg.ccr_functional) { + mtctl(coproc_cfg.ccr_functional, 10); + + cpu_data[cpunum].fp_rev = coproc_cfg.revision; + cpu_data[cpunum].fp_model = coproc_cfg.model; + + asm volatile ("fstd %fr0,8(%sp)"); + } else { + panic("must have an fpu to boot linux"); + } + + start_kernel(); + // not reached +} diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index c7e59f548817fb36554954d663bfedcdf5e3b2f6..303d2b647e418daab682f586565ade19e5420325 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -87,7 +87,7 @@ ENTRY_SAME(setuid) ENTRY_SAME(getuid) ENTRY_COMP(stime) /* 25 */ - ENTRY_SAME(ptrace) + ENTRY_COMP(ptrace) ENTRY_SAME(alarm) /* see stat comment */ ENTRY_COMP(newfstat) @@ -407,6 +407,12 @@ ENTRY_SAME(timerfd_create) ENTRY_COMP(timerfd_settime) ENTRY_COMP(timerfd_gettime) + ENTRY_COMP(signalfd4) + ENTRY_SAME(eventfd2) /* 310 */ + ENTRY_SAME(epoll_create1) + ENTRY_SAME(dup3) + ENTRY_SAME(pipe2) + ENTRY_SAME(inotify_init1) /* Nothing yet */ diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 24be86bba94d6bdf93618109bb490bf6fbe68205..4d09203bc69307275afcb92f5f3a53d2bdff5057 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -215,6 +216,24 @@ void __init start_cpu_itimer(void) cpu_data[cpu].it_value = next_tick; } +struct platform_device rtc_parisc_dev = { + .name = "rtc-parisc", + .id = -1, +}; + +static int __init rtc_init(void) +{ + int ret; + + ret = platform_device_register(&rtc_parisc_dev); + if (ret < 0) + printk(KERN_ERR "unable to register rtc device...\n"); + + /* not necessarily an error */ + return 0; +} +module_init(rtc_init); + void __init time_init(void) { static struct pdc_tod tod_data; @@ -245,4 +264,3 @@ void __init time_init(void) xtime.tv_nsec = 0; } } - diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 701b2d2d88823f55fb2d18a2c03c1e91b2b3f2a9..6773c582e457a15b3e9ddb5006e462754bddd06e 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -170,7 +170,7 @@ void unwind_table_remove(struct unwind_table *table) } /* Called from setup_arch to import the kernel unwind info */ -static int unwind_init(void) +int unwind_init(void) { long start, stop; register unsigned long gp __asm__ ("r27"); @@ -417,5 +417,3 @@ int unwind_to_user(struct unwind_frame_info *info) return ret; } - -module_init(unwind_init); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 380baa1780e9197b060ce1594aa098340d4da538..9391199d9e7731d166b45130fa322e27936b1da0 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -230,6 +230,8 @@ config PPC_OF_PLATFORM_PCI source "init/Kconfig" +source "kernel/Kconfig.freezer" + source "arch/powerpc/sysdev/Kconfig" source "arch/powerpc/platforms/Kconfig" diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 64e144505f653e0ae1d4f190690ef458e9aad5e7..5ac51e6efc1d860ea0d72702d60e63fa42a20c81 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -10,9 +10,13 @@ * 2 of the License, or (at your option) any later version. */ +#ifndef __ASSEMBLY__ +#include +#else +#include +#endif #include #include -#include /* * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index ae2ea803a0f2502daff6e2ffcd24359c1f602675..9047af7baa697afad42e89b19f2cfdd893ad7430 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -74,6 +74,13 @@ struct pci_controller { unsigned long pci_io_size; #endif + /* Some machines have a special region to forward the ISA + * "memory" cycles such as VGA memory regions. Left to 0 + * if unsupported + */ + resource_size_t isa_mem_phys; + resource_size_t isa_mem_size; + struct pci_ops *ops; unsigned int __iomem *cfg_addr; void __iomem *cfg_data; diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 0e52c7828ea498d48c34a37155dba0f57b39a349..39d547fde956a66f9dc3cb5d26914c3e01cb387b 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -123,6 +123,16 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ #define HAVE_PCI_MMAP 1 +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); + +#define HAVE_PCI_LEGACY 1 + #if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE) /* * For 64-bit kernels, pci_unmap_{single,page} is not a nop. @@ -226,5 +236,6 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, extern void pcibios_do_bus_setup(struct pci_bus *bus); extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus); + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_PCI_H */ diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index fda98715cd356a4b74a18e66c860c81ed62ee50a..5aa22cffdbd658d02c616ba822e3071ac642e3ac 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h @@ -678,6 +678,8 @@ struct ps3av_pkt_avb_param { u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE]; }; +/* channel status */ +extern u8 ps3av_mode_cs_info[]; /** command status **/ #define PS3AV_STATUS_SUCCESS 0x0000 /* success */ @@ -735,6 +737,7 @@ extern int ps3av_get_mode(void); extern int ps3av_video_mode2res(u32, u32 *, u32 *); extern int ps3av_video_mute(int); extern int ps3av_audio_mute(int); +extern int ps3av_audio_mute_analog(int); extern int ps3av_dev_open(void); extern int ps3av_dev_close(void); extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data), diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 734e0754fb9bec39a0d8533d77c30ad1c02d1103..280a90cc9894c6ed3f6a2e2611b9d53623218a2f 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -129,7 +129,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, #define CHECK_FULL_REGS(regs) \ do { \ if ((regs)->trap & 1) \ - printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \ + printk(KERN_CRIT "%s: partial register set\n", __func__); \ } while (0) #endif /* __powerpc64__ */ diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index a323c9b32ee12f76604631a532e2c5dc85436aa1..97e056379728c5f936aba586c0f9cf2acf044e34 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -27,6 +27,9 @@ #define DBG(fmt...) #endif +/* Stores the physical address of elf header of crash image. */ +unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; + void __init reserve_kdump_trampoline(void) { lmb_reserve(0, KDUMP_RESERVE_LIMIT); @@ -66,7 +69,11 @@ void __init setup_kdump_trampoline(void) DBG(" <- setup_kdump_trampoline()\n"); } -#ifdef CONFIG_PROC_VMCORE +/* + * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by + * is_kdump_kernel() to determine if we are booting after a panic. Hence + * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. + */ static int __init parse_elfcorehdr(char *p) { if (p) @@ -75,7 +82,6 @@ static int __init parse_elfcorehdr(char *p) return 1; } __setup("elfcorehdr=", parse_elfcorehdr); -#endif static int __init parse_savemaxmem(char *p) { diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 01ce8c38bae635334b6b3b9d104f0bb26d5861fe..3815d84a1ef4b8beb71dab7c8a26d90834320ee9 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -451,7 +451,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, pci_dev_put(pdev); } - DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); + DBG("non-PCI map for %llx, prot: %lx\n", + (unsigned long long)offset, prot); return __pgprot(prot); } @@ -490,6 +491,131 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, return ret; } +/* This provides legacy IO read access on a bus */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + unsigned long offset; + struct pci_controller *hose = pci_bus_to_host(bus); + struct resource *rp = &hose->io_resource; + void __iomem *addr; + + /* Check if port can be supported by that bus. We only check + * the ranges of the PHB though, not the bus itself as the rules + * for forwarding legacy cycles down bridges are not our problem + * here. So if the host bridge supports it, we do it. + */ + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + offset += port; + + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (offset < rp->start || (offset + size) > rp->end) + return -ENXIO; + addr = hose->io_base_virt + port; + + switch(size) { + case 1: + *((u8 *)val) = in_8(addr); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = in_le16(addr); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = in_le32(addr); + return 4; + } + return -EINVAL; +} + +/* This provides legacy IO write access on a bus */ +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + unsigned long offset; + struct pci_controller *hose = pci_bus_to_host(bus); + struct resource *rp = &hose->io_resource; + void __iomem *addr; + + /* Check if port can be supported by that bus. We only check + * the ranges of the PHB though, not the bus itself as the rules + * for forwarding legacy cycles down bridges are not our problem + * here. So if the host bridge supports it, we do it. + */ + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + offset += port; + + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (offset < rp->start || (offset + size) > rp->end) + return -ENXIO; + addr = hose->io_base_virt + port; + + /* WARNING: The generic code is idiotic. It gets passed a pointer + * to what can be a 1, 2 or 4 byte quantity and always reads that + * as a u32, which means that we have to correct the location of + * the data read within those 32 bits for size 1 and 2 + */ + switch(size) { + case 1: + out_8(addr, val >> 24); + return 1; + case 2: + if (port & 1) + return -EINVAL; + out_le16(addr, val >> 16); + return 2; + case 4: + if (port & 3) + return -EINVAL; + out_le32(addr, val); + return 4; + } + return -EINVAL; +} + +/* This provides legacy IO or memory mmap access on a bus */ +int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + resource_size_t offset = + ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; + resource_size_t size = vma->vm_end - vma->vm_start; + struct resource *rp; + + pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", + pci_domain_nr(bus), bus->number, + mmap_state == pci_mmap_mem ? "MEM" : "IO", + (unsigned long long)offset, + (unsigned long long)(offset + size - 1)); + + if (mmap_state == pci_mmap_mem) { + if ((offset + size) > hose->isa_mem_size) + return -ENXIO; + offset += hose->isa_mem_phys; + } else { + unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; + unsigned long roffset = offset + io_offset; + rp = &hose->io_resource; + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (roffset < rp->start || (roffset + size) > rp->end) + return -ENXIO; + offset += hose->io_base_phys; + } + pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); + + vma->vm_pgoff = offset >> PAGE_SHIFT; + vma->vm_page_prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) @@ -592,6 +718,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, cpu_addr = of_translate_address(dev, ranges + 3); size = of_read_number(ranges + pna + 3, 2); ranges += np; + + /* If we failed translation or got a zero-sized region + * (some FW try to feed us with non sensical zero sized regions + * such as power3 which look like some kind of attempt at exposing + * the VGA memory hole) + */ if (cpu_addr == OF_BAD_ADDR || size == 0) continue; @@ -665,6 +797,8 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, isa_hole = memno; if (primary || isa_mem_base == 0) isa_mem_base = cpu_addr; + hose->isa_mem_phys = cpu_addr; + hose->isa_mem_size = size; } /* We get the PCI/Mem offset from the first range or diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 98d7bf99533aff312712d747beed29141294608d..b9e1a1da6e52a35f2276c798b71e717bb224ec88 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -134,23 +134,6 @@ int arch_add_memory(int nid, u64 start, u64 size) return __add_pages(zone, start_pfn, nr_pages); } - -#ifdef CONFIG_MEMORY_HOTREMOVE -int remove_memory(u64 start, u64 size) -{ - unsigned long start_pfn, end_pfn; - int ret; - - start_pfn = start >> PAGE_SHIFT; - end_pfn = start_pfn + (size >> PAGE_SHIFT); - ret = offline_pages(start_pfn, end_pfn, 120 * HZ); - if (ret) - goto out; - /* Arch-specific calls go here - next patch */ -out: - return ret; -} -#endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_MEMORY_HOTPLUG */ /* diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 92d20e993ede097d5e732a2bf0b751f9931eb7d1..2ece399f2862361aaa0789f08a49a4bae169043e 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c @@ -232,6 +232,7 @@ static void __exit sputrace_exit(void) remove_proc_entry("sputrace", NULL); kfree(sputrace_log); + marker_synchronize_unregister(); } module_init(sputrace_init); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bc581d8a7cd9c35995e6bb75ae1f17d101840de1..70b7645ce745aa03863360201183a832aec3f130 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -78,6 +78,8 @@ config S390 source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Base setup" comment "Processor type and features" diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ea40a9d690fcc19407d8fd8a610d1af5b25a7a7d..de3fad60c6828d95c45a10944b2bc42a9b8fefb5 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -99,6 +99,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_31BIT 18 /* 32bit process */ #define TIF_MEMDIE 19 #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ +#define TIF_FREEZE 21 /* thread is freezing for suspend */ #define _TIF_SYSCALL_TRACE (1< #include #include +#include #include #include -#include +#include